2024-12-11 06:10:20,760 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-11 06:10:20,780 main DEBUG Took 0.017321 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 06:10:20,781 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 06:10:20,781 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 06:10:20,783 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 06:10:20,785 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,793 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 06:10:20,811 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,813 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,814 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,814 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,815 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,819 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,820 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,820 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,821 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,822 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,823 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,823 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,824 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,824 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,825 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,825 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,829 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,829 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,830 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,831 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,832 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,836 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 06:10:20,838 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,838 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 06:10:20,854 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 06:10:20,856 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 06:10:20,858 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 06:10:20,859 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 06:10:20,860 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 06:10:20,861 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 06:10:20,890 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 06:10:20,894 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 06:10:20,896 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 06:10:20,897 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 06:10:20,897 main DEBUG createAppenders(={Console}) 2024-12-11 06:10:20,904 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-11 06:10:20,904 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-11 06:10:20,905 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-11 06:10:20,905 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 06:10:20,906 main DEBUG OutputStream closed 2024-12-11 06:10:20,906 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 06:10:20,907 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 06:10:20,907 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-11 06:10:21,068 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 06:10:21,073 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 06:10:21,082 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 06:10:21,083 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 06:10:21,084 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 06:10:21,084 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 06:10:21,085 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 06:10:21,085 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 06:10:21,085 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 06:10:21,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 06:10:21,086 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 06:10:21,087 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 06:10:21,087 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 06:10:21,088 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 06:10:21,088 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 06:10:21,089 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 06:10:21,089 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 06:10:21,090 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 06:10:21,108 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 06:10:21,109 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-11 06:10:21,114 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 06:10:21,116 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-11T06:10:21,139 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-11 06:10:21,144 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 06:10:21,145 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T06:10:21,534 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df 2024-12-11T06:10:21,535 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-11T06:10:21,537 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-11T06:10:21,611 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T06:10:21,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T06:10:21,958 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462, deleteOnExit=true 2024-12-11T06:10:21,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T06:10:21,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/test.cache.data in system properties and HBase conf 2024-12-11T06:10:21,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T06:10:21,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir in system properties and HBase conf 2024-12-11T06:10:21,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T06:10:21,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T06:10:21,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T06:10:22,119 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T06:10:22,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T06:10:22,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T06:10:22,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T06:10:22,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T06:10:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T06:10:22,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T06:10:22,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T06:10:22,135 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T06:10:22,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T06:10:22,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/nfs.dump.dir in system properties and HBase conf 2024-12-11T06:10:22,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir in system properties and HBase conf 2024-12-11T06:10:22,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T06:10:22,138 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T06:10:22,139 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T06:10:23,390 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T06:10:23,488 INFO [Time-limited test {}] log.Log(170): Logging initialized @4039ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T06:10:23,597 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:23,705 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:23,743 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:23,744 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:23,746 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T06:10:23,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:23,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41878587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:23,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20e934eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:24,047 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44c957a2{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-34241-hadoop-hdfs-3_4_1-tests_jar-_-any-2750032233736162660/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T06:10:24,057 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241} 2024-12-11T06:10:24,057 INFO [Time-limited test {}] server.Server(415): Started @4609ms 2024-12-11T06:10:24,533 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:24,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:24,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:24,545 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:24,546 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T06:10:24,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@394d89b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:24,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27a35a11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:24,722 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b3e82e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-46863-hadoop-hdfs-3_4_1-tests_jar-_-any-15839102802259409481/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:10:24,724 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863} 2024-12-11T06:10:24,725 INFO [Time-limited test {}] server.Server(415): Started @5276ms 2024-12-11T06:10:24,813 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T06:10:25,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:25,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:25,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:25,070 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:25,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T06:10:25,075 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f30d4b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:25,078 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca589bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:25,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@213fabce{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-39435-hadoop-hdfs-3_4_1-tests_jar-_-any-8336184894245374428/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:10:25,283 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435} 2024-12-11T06:10:25,284 INFO [Time-limited test {}] server.Server(415): Started @5835ms 2024-12-11T06:10:25,289 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T06:10:25,431 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:25,451 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:25,464 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:25,465 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:25,465 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T06:10:25,477 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@693ee7fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:25,478 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@bc523e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:25,545 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,548 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,564 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,579 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68703158{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-34795-hadoop-hdfs-3_4_1-tests_jar-_-any-8641290008843436444/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:10:25,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795} 2024-12-11T06:10:25,701 INFO [Time-limited test {}] server.Server(415): Started @6253ms 2024-12-11T06:10:25,705 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T06:10:25,738 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T06:10:25,742 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T06:10:25,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b57494ebaef7b1a with lease ID 0x512a4139138bbb5b: Processing first storage report for DS-fbe345be-5279-421e-a264-2bb8b6b82afb from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=72cee194-7e77-452d-85dc-f786cfd4c1a8, infoPort=37187, infoSecurePort=0, ipcPort=38485, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b57494ebaef7b1a with lease ID 0x512a4139138bbb5b: from storage DS-fbe345be-5279-421e-a264-2bb8b6b82afb node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=72cee194-7e77-452d-85dc-f786cfd4c1a8, infoPort=37187, infoSecurePort=0, ipcPort=38485, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-11T06:10:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7b57494ebaef7b1a with lease ID 0x512a4139138bbb5b: Processing first storage report for DS-55ef1aa3-b393-499d-bf46-f15fc5209116 from datanode DatanodeRegistration(127.0.0.1:36519, datanodeUuid=72cee194-7e77-452d-85dc-f786cfd4c1a8, infoPort=37187, infoSecurePort=0, ipcPort=38485, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7b57494ebaef7b1a with lease ID 0x512a4139138bbb5b: from storage DS-55ef1aa3-b393-499d-bf46-f15fc5209116 node DatanodeRegistration(127.0.0.1:36519, datanodeUuid=72cee194-7e77-452d-85dc-f786cfd4c1a8, infoPort=37187, infoSecurePort=0, ipcPort=38485, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T06:10:25,845 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf63bacd6fd4e6683 with lease ID 0x512a4139138bbb5a: Processing first storage report for DS-f5503314-a73e-487a-bfb6-009f9d2b7f11 from datanode DatanodeRegistration(127.0.0.1:33445, datanodeUuid=a0cb6457-4909-410b-b27c-791da41291ae, infoPort=34059, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:25,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf63bacd6fd4e6683 with lease ID 0x512a4139138bbb5a: from storage DS-f5503314-a73e-487a-bfb6-009f9d2b7f11 node DatanodeRegistration(127.0.0.1:33445, datanodeUuid=a0cb6457-4909-410b-b27c-791da41291ae, infoPort=34059, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T06:10:25,846 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf63bacd6fd4e6683 with lease ID 0x512a4139138bbb5a: Processing first storage report for DS-0f9700f5-29dc-4d2f-934e-7deba4afd1c7 from datanode DatanodeRegistration(127.0.0.1:33445, datanodeUuid=a0cb6457-4909-410b-b27c-791da41291ae, infoPort=34059, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:25,846 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf63bacd6fd4e6683 with lease ID 0x512a4139138bbb5a: from storage DS-0f9700f5-29dc-4d2f-934e-7deba4afd1c7 node DatanodeRegistration(127.0.0.1:33445, datanodeUuid=a0cb6457-4909-410b-b27c-791da41291ae, infoPort=34059, infoSecurePort=0, ipcPort=46725, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T06:10:25,932 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,952 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063/current, will proceed with Du for space computation calculation, 2024-12-11T06:10:25,988 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T06:10:25,998 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6fbc6a77a10ded with lease ID 0x512a4139138bbb5c: Processing first storage report for DS-19137002-3163-45ba-9dc1-e7f6de3d535e from datanode DatanodeRegistration(127.0.0.1:37875, datanodeUuid=75e5eb78-af4b-40fd-a136-a194224b2e9f, infoPort=45813, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:25,999 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6fbc6a77a10ded with lease ID 0x512a4139138bbb5c: from storage DS-19137002-3163-45ba-9dc1-e7f6de3d535e node DatanodeRegistration(127.0.0.1:37875, datanodeUuid=75e5eb78-af4b-40fd-a136-a194224b2e9f, infoPort=45813, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T06:10:25,999 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc6fbc6a77a10ded with lease ID 0x512a4139138bbb5c: Processing first storage report for DS-191fedd1-dc19-4d92-9a8b-41ac634de558 from datanode DatanodeRegistration(127.0.0.1:37875, datanodeUuid=75e5eb78-af4b-40fd-a136-a194224b2e9f, infoPort=45813, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063) 2024-12-11T06:10:26,000 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc6fbc6a77a10ded with lease ID 0x512a4139138bbb5c: from storage DS-191fedd1-dc19-4d92-9a8b-41ac634de558 node DatanodeRegistration(127.0.0.1:37875, datanodeUuid=75e5eb78-af4b-40fd-a136-a194224b2e9f, infoPort=45813, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=2145326054;c=1733897423063), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T06:10:26,289 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df 2024-12-11T06:10:26,450 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/zookeeper_0, clientPort=62516, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T06:10:26,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62516 2024-12-11T06:10:26,493 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:26,497 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:26,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741825_1001 (size=7) 2024-12-11T06:10:26,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741825_1001 (size=7) 2024-12-11T06:10:26,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741825_1001 (size=7) 2024-12-11T06:10:27,247 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b with version=8 2024-12-11T06:10:27,247 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/hbase-staging 2024-12-11T06:10:27,399 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T06:10:27,744 INFO [Time-limited test {}] client.ConnectionUtils(128): master/b66397bb9319:0 server-side Connection retries=45 2024-12-11T06:10:27,759 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:27,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:27,767 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T06:10:27,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:27,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T06:10:27,967 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T06:10:28,059 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T06:10:28,068 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T06:10:28,072 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T06:10:28,105 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 24018 (auto-detected) 2024-12-11T06:10:28,107 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T06:10:28,131 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39063 2024-12-11T06:10:28,155 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39063 connecting to ZooKeeper ensemble=127.0.0.1:62516 2024-12-11T06:10:28,193 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:390630x0, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T06:10:28,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39063-0x1007fb2ceb60000 connected 2024-12-11T06:10:28,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,280 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:10:28,285 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b, hbase.cluster.distributed=false 2024-12-11T06:10:28,327 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T06:10:28,333 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39063 2024-12-11T06:10:28,333 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39063 2024-12-11T06:10:28,336 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39063 2024-12-11T06:10:28,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39063 2024-12-11T06:10:28,344 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39063 2024-12-11T06:10:28,504 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b66397bb9319:0 server-side Connection retries=45 2024-12-11T06:10:28,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,508 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,508 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T06:10:28,508 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,509 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T06:10:28,513 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T06:10:28,517 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T06:10:28,518 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35939 2024-12-11T06:10:28,521 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35939 connecting to ZooKeeper ensemble=127.0.0.1:62516 2024-12-11T06:10:28,523 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,526 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,534 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359390x0, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T06:10:28,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359390x0, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:10:28,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35939-0x1007fb2ceb60001 connected 2024-12-11T06:10:28,543 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T06:10:28,551 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T06:10:28,555 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T06:10:28,563 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T06:10:28,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35939 2024-12-11T06:10:28,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35939 2024-12-11T06:10:28,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35939 2024-12-11T06:10:28,574 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35939 2024-12-11T06:10:28,575 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35939 2024-12-11T06:10:28,597 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b66397bb9319:0 server-side Connection retries=45 2024-12-11T06:10:28,597 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,598 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,598 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T06:10:28,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,599 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T06:10:28,599 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T06:10:28,600 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T06:10:28,601 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44539 2024-12-11T06:10:28,603 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44539 connecting to ZooKeeper ensemble=127.0.0.1:62516 2024-12-11T06:10:28,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,607 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:445390x0, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:10:28,618 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T06:10:28,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:445390x0, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T06:10:28,619 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T06:10:28,619 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44539-0x1007fb2ceb60002 connected 2024-12-11T06:10:28,621 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T06:10:28,623 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T06:10:28,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44539 2024-12-11T06:10:28,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44539 2024-12-11T06:10:28,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44539 2024-12-11T06:10:28,636 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44539 2024-12-11T06:10:28,637 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44539 2024-12-11T06:10:28,664 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/b66397bb9319:0 server-side Connection retries=45 2024-12-11T06:10:28,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,664 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,665 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T06:10:28,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T06:10:28,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T06:10:28,665 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T06:10:28,666 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T06:10:28,672 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42579 2024-12-11T06:10:28,674 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42579 connecting to ZooKeeper ensemble=127.0.0.1:62516 2024-12-11T06:10:28,676 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,680 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:28,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425790x0, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T06:10:28,696 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42579-0x1007fb2ceb60003 connected 2024-12-11T06:10:28,696 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425790x0, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:10:28,697 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T06:10:28,705 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-11T06:10:28,708 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T06:10:28,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T06:10:28,729 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42579 2024-12-11T06:10:28,730 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42579 2024-12-11T06:10:28,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42579 2024-12-11T06:10:28,744 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42579 2024-12-11T06:10:28,745 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42579 2024-12-11T06:10:28,773 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;b66397bb9319:39063 2024-12-11T06:10:28,774 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/b66397bb9319,39063,1733897427482 2024-12-11T06:10:28,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,791 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/b66397bb9319,39063,1733897427482 2024-12-11T06:10:28,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T06:10:28,825 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:28,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:28,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T06:10:28,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:28,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T06:10:28,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:28,830 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T06:10:28,832 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/b66397bb9319,39063,1733897427482 from backup master directory 2024-12-11T06:10:28,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/b66397bb9319,39063,1733897427482 2024-12-11T06:10:28,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T06:10:28,838 WARN [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T06:10:28,839 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=b66397bb9319,39063,1733897427482 2024-12-11T06:10:28,842 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T06:10:28,844 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T06:10:28,940 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/hbase.id] with ID: 5d78b9e9-31a6-4f57-a820-448a70e091f0 2024-12-11T06:10:28,940 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.tmp/hbase.id 2024-12-11T06:10:28,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741826_1002 (size=42) 2024-12-11T06:10:28,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741826_1002 (size=42) 2024-12-11T06:10:28,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741826_1002 (size=42) 2024-12-11T06:10:28,986 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.tmp/hbase.id]:[hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/hbase.id] 2024-12-11T06:10:29,073 INFO [master/b66397bb9319:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:29,080 INFO [master/b66397bb9319:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T06:10:29,111 INFO [master/b66397bb9319:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 28ms. 2024-12-11T06:10:29,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:29,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:29,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:29,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:29,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741827_1003 (size=196) 2024-12-11T06:10:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741827_1003 (size=196) 2024-12-11T06:10:29,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741827_1003 (size=196) 2024-12-11T06:10:29,221 INFO [master/b66397bb9319:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:10:29,224 INFO [master/b66397bb9319:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T06:10:29,250 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:10:29,257 INFO [master/b66397bb9319:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T06:10:29,298 WARN [IPC Server handler 4 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:29,298 WARN [IPC Server handler 4 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:29,299 WARN [IPC Server handler 4 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:29,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741828_1004 (size=1189) 2024-12-11T06:10:29,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741828_1004 (size=1189) 2024-12-11T06:10:29,369 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/data/master/store 2024-12-11T06:10:29,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741829_1005 (size=34) 2024-12-11T06:10:29,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741829_1005 (size=34) 2024-12-11T06:10:29,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741829_1005 (size=34) 2024-12-11T06:10:29,446 INFO [master/b66397bb9319:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T06:10:29,451 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:29,453 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T06:10:29,453 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:10:29,453 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:10:29,455 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T06:10:29,456 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:10:29,456 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:10:29,457 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733897429452Disabling compacts and flushes for region at 1733897429452Disabling writes for close at 1733897429455 (+3 ms)Writing region close event to WAL at 1733897429456 (+1 ms)Closed at 1733897429456 2024-12-11T06:10:29,460 WARN [master/b66397bb9319:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/data/master/store/.initializing 2024-12-11T06:10:29,461 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482 2024-12-11T06:10:29,473 INFO [master/b66397bb9319:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T06:10:29,497 INFO [master/b66397bb9319:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b66397bb9319%2C39063%2C1733897427482, suffix=, logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482, archiveDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/oldWALs, maxLogs=10 2024-12-11T06:10:29,534 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504, exclude list is [], retry=0 2024-12-11T06:10:29,540 WARN [IPC Server handler 1 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:29,540 WARN [IPC Server handler 1 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:29,541 WARN [IPC Server handler 1 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:29,557 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33445,DS-f5503314-a73e-487a-bfb6-009f9d2b7f11,DISK] 2024-12-11T06:10:29,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T06:10:29,563 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36519,DS-fbe345be-5279-421e-a264-2bb8b6b82afb,DISK] 2024-12-11T06:10:29,619 INFO [master/b66397bb9319:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 2024-12-11T06:10:29,623 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34059:34059),(127.0.0.1/127.0.0.1:37187:37187)] 2024-12-11T06:10:29,624 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:10:29,625 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:29,630 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,631 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,693 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,730 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T06:10:29,736 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:29,746 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:29,747 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T06:10:29,758 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:29,759 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:29,760 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,763 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T06:10:29,764 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:29,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:29,765 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,771 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T06:10:29,771 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:29,772 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:29,773 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,780 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,782 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,790 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,791 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,801 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T06:10:29,816 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T06:10:29,836 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:29,838 INFO [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60020221, jitterRate=-0.10562901198863983}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T06:10:29,851 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733897429652Initializing all the Stores at 1733897429656 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897429656Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897429661 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897429661Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897429661Cleaning up temporary data from old regions at 1733897429791 (+130 ms)Region opened successfully at 1733897429851 (+60 ms) 2024-12-11T06:10:29,855 INFO [master/b66397bb9319:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T06:10:29,926 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1500ff7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b66397bb9319/172.17.0.2:0 2024-12-11T06:10:29,975 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T06:10:29,992 INFO [master/b66397bb9319:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T06:10:29,992 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T06:10:29,996 INFO [master/b66397bb9319:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T06:10:29,998 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T06:10:30,006 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-12-11T06:10:30,006 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T06:10:30,040 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T06:10:30,055 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T06:10:30,058 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T06:10:30,062 INFO [master/b66397bb9319:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T06:10:30,064 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T06:10:30,067 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T06:10:30,071 INFO [master/b66397bb9319:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T06:10:30,077 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T06:10:30,079 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T06:10:30,083 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T06:10:30,085 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T06:10:30,105 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T06:10:30,107 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T06:10:30,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,116 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=b66397bb9319,39063,1733897427482, sessionid=0x1007fb2ceb60000, setting cluster-up flag (Was=false) 2024-12-11T06:10:30,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,133 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,143 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T06:10:30,148 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b66397bb9319,39063,1733897427482 2024-12-11T06:10:30,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:30,169 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T06:10:30,171 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=b66397bb9319,39063,1733897427482 2024-12-11T06:10:30,178 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T06:10:30,225 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-11T06:10:30,235 INFO [master/b66397bb9319:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:30,235 INFO [master/b66397bb9319:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-11T06:10:30,258 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(746): ClusterId : 5d78b9e9-31a6-4f57-a820-448a70e091f0 2024-12-11T06:10:30,261 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(746): ClusterId : 5d78b9e9-31a6-4f57-a820-448a70e091f0 2024-12-11T06:10:30,262 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T06:10:30,265 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T06:10:30,275 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T06:10:30,276 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T06:10:30,278 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T06:10:30,279 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T06:10:30,280 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T06:10:30,280 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(746): ClusterId : 5d78b9e9-31a6-4f57-a820-448a70e091f0 2024-12-11T06:10:30,280 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T06:10:30,280 DEBUG [RS:1;b66397bb9319:44539 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78640cf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b66397bb9319/172.17.0.2:0 2024-12-11T06:10:30,283 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T06:10:30,283 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T06:10:30,284 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T06:10:30,285 DEBUG [RS:0;b66397bb9319:35939 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c1ab2fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b66397bb9319/172.17.0.2:0 2024-12-11T06:10:30,286 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T06:10:30,287 DEBUG [RS:2;b66397bb9319:42579 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3af3df89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=b66397bb9319/172.17.0.2:0 2024-12-11T06:10:30,301 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;b66397bb9319:44539 2024-12-11T06:10:30,306 INFO [RS:1;b66397bb9319:44539 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T06:10:30,306 INFO [RS:1;b66397bb9319:44539 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T06:10:30,306 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T06:10:30,307 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;b66397bb9319:35939 2024-12-11T06:10:30,307 INFO [RS:1;b66397bb9319:44539 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:30,307 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T06:10:30,307 INFO [RS:0;b66397bb9319:35939 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T06:10:30,307 INFO [RS:0;b66397bb9319:35939 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T06:10:30,307 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T06:10:30,308 INFO [RS:0;b66397bb9319:35939 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:30,308 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T06:10:30,311 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=44539, startcode=1733897428597 2024-12-11T06:10:30,311 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=35939, startcode=1733897428441 2024-12-11T06:10:30,319 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;b66397bb9319:42579 2024-12-11T06:10:30,319 INFO [RS:2;b66397bb9319:42579 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T06:10:30,319 INFO [RS:2;b66397bb9319:42579 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T06:10:30,319 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-11T06:10:30,320 INFO [RS:2;b66397bb9319:42579 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:30,320 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T06:10:30,319 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T06:10:30,321 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=42579, startcode=1733897428663 2024-12-11T06:10:30,327 DEBUG [RS:2;b66397bb9319:42579 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T06:10:30,327 DEBUG [RS:0;b66397bb9319:35939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T06:10:30,327 DEBUG [RS:1;b66397bb9319:44539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T06:10:30,333 INFO [master/b66397bb9319:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T06:10:30,343 INFO [master/b66397bb9319:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T06:10:30,351 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: b66397bb9319,39063,1733897427482 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T06:10:30,361 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/b66397bb9319:0, corePoolSize=5, maxPoolSize=5 2024-12-11T06:10:30,361 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/b66397bb9319:0, corePoolSize=5, maxPoolSize=5 2024-12-11T06:10:30,361 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/b66397bb9319:0, corePoolSize=5, maxPoolSize=5 2024-12-11T06:10:30,361 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/b66397bb9319:0, corePoolSize=5, maxPoolSize=5 2024-12-11T06:10:30,362 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/b66397bb9319:0, corePoolSize=10, maxPoolSize=10 2024-12-11T06:10:30,362 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,362 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/b66397bb9319:0, corePoolSize=2, maxPoolSize=2 2024-12-11T06:10:30,362 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,386 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T06:10:30,390 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,390 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,390 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:30,390 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:30,391 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53279, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T06:10:30,396 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58789, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T06:10:30,396 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T06:10:30,405 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T06:10:30,406 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T06:10:30,411 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733897460410 2024-12-11T06:10:30,413 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T06:10:30,414 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T06:10:30,420 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T06:10:30,420 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T06:10:30,421 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T06:10:30,421 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T06:10:30,421 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T06:10:30,422 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T06:10:30,428 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,432 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:30,433 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T06:10:30,439 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T06:10:30,439 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T06:10:30,439 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-11T06:10:30,439 WARN [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T06:10:30,439 WARN [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T06:10:30,439 WARN [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-11T06:10:30,440 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T06:10:30,441 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T06:10:30,441 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T06:10:30,444 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T06:10:30,444 WARN [IPC Server handler 4 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,444 WARN [IPC Server handler 4 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:30,444 WARN [IPC Server handler 4 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:30,444 INFO [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T06:10:30,448 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.large.0-1733897430445,5,FailOnTimeoutGroup] 2024-12-11T06:10:30,448 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.small.0-1733897430448,5,FailOnTimeoutGroup] 2024-12-11T06:10:30,449 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,449 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T06:10:30,450 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,450 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741831_1007 (size=1321) 2024-12-11T06:10:30,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741831_1007 (size=1321) 2024-12-11T06:10:30,461 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T06:10:30,461 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:30,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741832_1008 (size=32) 2024-12-11T06:10:30,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741832_1008 (size=32) 2024-12-11T06:10:30,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741832_1008 (size=32) 2024-12-11T06:10:30,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:30,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T06:10:30,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T06:10:30,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:30,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:30,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T06:10:30,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T06:10:30,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:30,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:30,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T06:10:30,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T06:10:30,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:30,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:30,526 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T06:10:30,529 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T06:10:30,529 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:30,530 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:30,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T06:10:30,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740 2024-12-11T06:10:30,533 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740 2024-12-11T06:10:30,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T06:10:30,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T06:10:30,537 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T06:10:30,540 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T06:10:30,541 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=35939, startcode=1733897428441 2024-12-11T06:10:30,541 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=44539, startcode=1733897428597 2024-12-11T06:10:30,542 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(2659): reportForDuty to master=b66397bb9319,39063,1733897427482 with port=42579, startcode=1733897428663 2024-12-11T06:10:30,544 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,547 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(517): Registering regionserver=b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,553 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:30,554 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72586102, jitterRate=0.08161720633506775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T06:10:30,557 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,557 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(517): Registering regionserver=b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,557 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:30,557 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42173 2024-12-11T06:10:30,557 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T06:10:30,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733897430508Initializing all the Stores at 1733897430510 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897430511 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897430511Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897430511Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897430511Cleaning up temporary data from old regions at 1733897430536 (+25 ms)Region opened successfully at 1733897430559 (+23 ms) 2024-12-11T06:10:30,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T06:10:30,559 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T06:10:30,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T06:10:30,559 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T06:10:30,560 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T06:10:30,561 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T06:10:30,561 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733897430559Disabling compacts and flushes for region at 1733897430559Disabling writes for close at 1733897430559Writing region close event to WAL at 1733897430561 (+2 ms)Closed at 1733897430561 2024-12-11T06:10:30,561 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:30,561 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(363): Checking decommissioned status of RegionServer b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,562 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42173 2024-12-11T06:10:30,562 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T06:10:30,562 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] master.ServerManager(517): Registering regionserver=b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T06:10:30,566 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:30,566 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42173 2024-12-11T06:10:30,566 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T06:10:30,568 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T06:10:30,568 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T06:10:30,569 DEBUG [RS:1;b66397bb9319:44539 {}] zookeeper.ZKUtil(111): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,569 WARN [RS:1;b66397bb9319:44539 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T06:10:30,570 INFO [RS:1;b66397bb9319:44539 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T06:10:30,570 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,571 DEBUG [RS:0;b66397bb9319:35939 {}] zookeeper.ZKUtil(111): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,571 DEBUG [RS:2;b66397bb9319:42579 {}] zookeeper.ZKUtil(111): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,571 WARN [RS:0;b66397bb9319:35939 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T06:10:30,571 WARN [RS:2;b66397bb9319:42579 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T06:10:30,571 INFO [RS:0;b66397bb9319:35939 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T06:10:30,571 INFO [RS:2;b66397bb9319:42579 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T06:10:30,571 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,571 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,577 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T06:10:30,579 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b66397bb9319,35939,1733897428441] 2024-12-11T06:10:30,579 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b66397bb9319,42579,1733897428663] 2024-12-11T06:10:30,579 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [b66397bb9319,44539,1733897428597] 2024-12-11T06:10:30,589 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T06:10:30,594 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T06:10:30,613 INFO [RS:0;b66397bb9319:35939 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T06:10:30,613 INFO [RS:2;b66397bb9319:42579 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T06:10:30,613 INFO [RS:1;b66397bb9319:44539 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T06:10:30,630 INFO [RS:0;b66397bb9319:35939 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T06:10:30,636 INFO [RS:1;b66397bb9319:44539 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T06:10:30,637 INFO [RS:2;b66397bb9319:42579 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T06:10:30,642 INFO [RS:1;b66397bb9319:44539 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T06:10:30,642 INFO [RS:0;b66397bb9319:35939 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T06:10:30,642 INFO [RS:2;b66397bb9319:42579 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T06:10:30,642 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,642 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,642 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,644 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T06:10:30,644 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T06:10:30,645 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T06:10:30,650 INFO [RS:1;b66397bb9319:44539 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T06:10:30,650 INFO [RS:2;b66397bb9319:42579 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T06:10:30,650 INFO [RS:0;b66397bb9319:35939 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T06:10:30,652 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,652 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,652 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,652 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,652 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,652 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b66397bb9319:0, corePoolSize=2, maxPoolSize=2 2024-12-11T06:10:30,653 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,653 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b66397bb9319:0, corePoolSize=2, maxPoolSize=2 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/b66397bb9319:0, corePoolSize=2, maxPoolSize=2 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,654 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:2;b66397bb9319:42579 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,654 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,655 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,655 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/b66397bb9319:0, corePoolSize=1, maxPoolSize=1 2024-12-11T06:10:30,655 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,655 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,655 DEBUG [RS:1;b66397bb9319:44539 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,655 DEBUG [RS:0;b66397bb9319:35939 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/b66397bb9319:0, corePoolSize=3, maxPoolSize=3 2024-12-11T06:10:30,664 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,664 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,665 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,665 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,44539,1733897428597-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T06:10:30,665 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,35939,1733897428441-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T06:10:30,665 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,42579,1733897428663-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T06:10:30,702 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T06:10:30,702 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T06:10:30,702 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T06:10:30,705 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,35939,1733897428441-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,705 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,42579,1733897428663-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,705 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,44539,1733897428597-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,706 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,706 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,706 INFO [RS:0;b66397bb9319:35939 {}] regionserver.Replication(171): b66397bb9319,35939,1733897428441 started 2024-12-11T06:10:30,706 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,706 INFO [RS:2;b66397bb9319:42579 {}] regionserver.Replication(171): b66397bb9319,42579,1733897428663 started 2024-12-11T06:10:30,706 INFO [RS:1;b66397bb9319:44539 {}] regionserver.Replication(171): b66397bb9319,44539,1733897428597 started 2024-12-11T06:10:30,734 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,735 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1482): Serving as b66397bb9319,35939,1733897428441, RpcServer on b66397bb9319/172.17.0.2:35939, sessionid=0x1007fb2ceb60001 2024-12-11T06:10:30,736 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T06:10:30,736 DEBUG [RS:0;b66397bb9319:35939 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,736 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,35939,1733897428441' 2024-12-11T06:10:30,736 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T06:10:30,738 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,738 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1482): Serving as b66397bb9319,42579,1733897428663, RpcServer on b66397bb9319/172.17.0.2:42579, sessionid=0x1007fb2ceb60003 2024-12-11T06:10:30,738 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T06:10:30,738 DEBUG [RS:2;b66397bb9319:42579 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,738 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,42579,1733897428663' 2024-12-11T06:10:30,739 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T06:10:30,743 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T06:10:30,745 WARN [b66397bb9319:39063 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T06:10:30,747 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T06:10:30,748 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T06:10:30,748 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T06:10:30,748 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T06:10:30,748 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T06:10:30,748 DEBUG [RS:0;b66397bb9319:35939 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b66397bb9319,35939,1733897428441 2024-12-11T06:10:30,748 DEBUG [RS:2;b66397bb9319:42579 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b66397bb9319,42579,1733897428663 2024-12-11T06:10:30,748 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,42579,1733897428663' 2024-12-11T06:10:30,748 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,35939,1733897428441' 2024-12-11T06:10:30,748 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T06:10:30,748 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T06:10:30,750 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T06:10:30,751 DEBUG [RS:2;b66397bb9319:42579 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T06:10:30,751 INFO [RS:2;b66397bb9319:42579 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T06:10:30,752 INFO [RS:2;b66397bb9319:42579 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T06:10:30,752 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:30,753 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1482): Serving as b66397bb9319,44539,1733897428597, RpcServer on b66397bb9319/172.17.0.2:44539, sessionid=0x1007fb2ceb60002 2024-12-11T06:10:30,753 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T06:10:30,753 DEBUG [RS:1;b66397bb9319:44539 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,753 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,44539,1733897428597' 2024-12-11T06:10:30,753 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T06:10:30,756 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T06:10:30,756 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T06:10:30,757 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T06:10:30,757 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T06:10:30,757 DEBUG [RS:1;b66397bb9319:44539 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager b66397bb9319,44539,1733897428597 2024-12-11T06:10:30,757 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'b66397bb9319,44539,1733897428597' 2024-12-11T06:10:30,757 DEBUG [RS:0;b66397bb9319:35939 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T06:10:30,757 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T06:10:30,757 INFO [RS:0;b66397bb9319:35939 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T06:10:30,757 INFO [RS:0;b66397bb9319:35939 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T06:10:30,758 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T06:10:30,758 DEBUG [RS:1;b66397bb9319:44539 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T06:10:30,758 INFO [RS:1;b66397bb9319:44539 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T06:10:30,758 INFO [RS:1;b66397bb9319:44539 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T06:10:30,857 INFO [RS:2;b66397bb9319:42579 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T06:10:30,858 INFO [RS:0;b66397bb9319:35939 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T06:10:30,859 INFO [RS:1;b66397bb9319:44539 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T06:10:30,861 INFO [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b66397bb9319%2C35939%2C1733897428441, suffix=, logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441, archiveDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs, maxLogs=32 2024-12-11T06:10:30,861 INFO [RS:2;b66397bb9319:42579 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b66397bb9319%2C42579%2C1733897428663, suffix=, logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,42579,1733897428663, archiveDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs, maxLogs=32 2024-12-11T06:10:30,862 INFO [RS:1;b66397bb9319:44539 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b66397bb9319%2C44539%2C1733897428597, suffix=, logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,44539,1733897428597, archiveDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs, maxLogs=32 2024-12-11T06:10:30,880 DEBUG [RS:0;b66397bb9319:35939 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441/b66397bb9319%2C35939%2C1733897428441.1733897430866, exclude list is [], retry=0 2024-12-11T06:10:30,882 WARN [IPC Server handler 2 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,883 WARN [IPC Server handler 2 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:30,883 WARN [IPC Server handler 2 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:30,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33445,DS-f5503314-a73e-487a-bfb6-009f9d2b7f11,DISK] 2024-12-11T06:10:30,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36519,DS-fbe345be-5279-421e-a264-2bb8b6b82afb,DISK] 2024-12-11T06:10:30,886 DEBUG [RS:1;b66397bb9319:44539 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,44539,1733897428597/b66397bb9319%2C44539%2C1733897428597.1733897430866, exclude list is [], retry=0 2024-12-11T06:10:30,887 DEBUG [RS:2;b66397bb9319:42579 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,42579,1733897428663/b66397bb9319%2C42579%2C1733897428663.1733897430866, exclude list is [], retry=0 2024-12-11T06:10:30,889 WARN [IPC Server handler 2 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,889 WARN [IPC Server handler 2 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:30,889 WARN [IPC Server handler 2 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:30,889 WARN [IPC Server handler 0 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-11T06:10:30,889 WARN [IPC Server handler 0 on default port 42173 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-11T06:10:30,889 WARN [IPC Server handler 0 on default port 42173 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-11T06:10:30,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36519,DS-fbe345be-5279-421e-a264-2bb8b6b82afb,DISK] 2024-12-11T06:10:30,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33445,DS-f5503314-a73e-487a-bfb6-009f9d2b7f11,DISK] 2024-12-11T06:10:30,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33445,DS-f5503314-a73e-487a-bfb6-009f9d2b7f11,DISK] 2024-12-11T06:10:30,892 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36519,DS-fbe345be-5279-421e-a264-2bb8b6b82afb,DISK] 2024-12-11T06:10:30,906 INFO [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441/b66397bb9319%2C35939%2C1733897428441.1733897430866 2024-12-11T06:10:30,906 INFO [RS:1;b66397bb9319:44539 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,44539,1733897428597/b66397bb9319%2C44539%2C1733897428597.1733897430866 2024-12-11T06:10:30,907 DEBUG [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37187:37187),(127.0.0.1/127.0.0.1:34059:34059)] 2024-12-11T06:10:30,907 DEBUG [RS:1;b66397bb9319:44539 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37187:37187),(127.0.0.1/127.0.0.1:34059:34059)] 2024-12-11T06:10:30,908 INFO [RS:2;b66397bb9319:42579 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,42579,1733897428663/b66397bb9319%2C42579%2C1733897428663.1733897430866 2024-12-11T06:10:30,909 DEBUG [RS:2;b66397bb9319:42579 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34059:34059),(127.0.0.1/127.0.0.1:37187:37187)] 2024-12-11T06:10:30,998 DEBUG [b66397bb9319:39063 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T06:10:31,008 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:10:31,015 INFO [b66397bb9319:39063 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:10:31,015 INFO [b66397bb9319:39063 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:10:31,015 INFO [b66397bb9319:39063 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:10:31,015 DEBUG [b66397bb9319:39063 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:10:31,026 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:31,035 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b66397bb9319,35939,1733897428441, state=OPENING 2024-12-11T06:10:31,040 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T06:10:31,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:31,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:31,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:31,043 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:31,044 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,044 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,044 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,044 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,046 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T06:10:31,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:10:31,229 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:31,233 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50349, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:31,254 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T06:10:31,254 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T06:10:31,255 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T06:10:31,260 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=b66397bb9319%2C35939%2C1733897428441.meta, suffix=.meta, logDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441, archiveDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs, maxLogs=32 2024-12-11T06:10:31,301 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441/b66397bb9319%2C35939%2C1733897428441.meta.1733897431269.meta, exclude list is [], retry=0 2024-12-11T06:10:31,307 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37875,DS-19137002-3163-45ba-9dc1-e7f6de3d535e,DISK] 2024-12-11T06:10:31,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33445,DS-f5503314-a73e-487a-bfb6-009f9d2b7f11,DISK] 2024-12-11T06:10:31,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36519,DS-fbe345be-5279-421e-a264-2bb8b6b82afb,DISK] 2024-12-11T06:10:31,326 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/WALs/b66397bb9319,35939,1733897428441/b66397bb9319%2C35939%2C1733897428441.meta.1733897431269.meta 2024-12-11T06:10:31,328 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45813:45813),(127.0.0.1/127.0.0.1:34059:34059),(127.0.0.1/127.0.0.1:37187:37187)] 2024-12-11T06:10:31,328 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:10:31,330 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-11T06:10:31,331 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:31,332 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T06:10:31,335 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T06:10:31,337 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T06:10:31,350 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T06:10:31,351 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:31,351 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T06:10:31,351 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T06:10:31,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T06:10:31,369 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T06:10:31,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:31,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:31,371 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T06:10:31,372 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T06:10:31,372 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:31,373 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:31,374 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T06:10:31,375 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T06:10:31,376 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:31,376 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:31,377 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T06:10:31,379 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T06:10:31,379 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:31,380 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T06:10:31,380 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T06:10:31,382 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740 2024-12-11T06:10:31,386 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740 2024-12-11T06:10:31,392 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T06:10:31,392 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T06:10:31,393 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T06:10:31,397 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T06:10:31,399 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70528901, jitterRate=0.05096252262592316}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T06:10:31,400 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T06:10:31,404 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733897431353Writing region info on filesystem at 1733897431353Initializing all the Stores at 1733897431361 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897431361Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897431362 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897431362Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897431362Cleaning up temporary data from old regions at 1733897431392 (+30 ms)Running coprocessor post-open hooks at 1733897431400 (+8 ms)Region opened successfully at 1733897431404 (+4 ms) 2024-12-11T06:10:31,415 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733897431220 2024-12-11T06:10:31,435 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:31,436 DEBUG [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T06:10:31,437 INFO [RS_OPEN_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T06:10:31,438 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as b66397bb9319,35939,1733897428441, state=OPEN 2024-12-11T06:10:31,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T06:10:31,448 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T06:10:31,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T06:10:31,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T06:10:31,449 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T06:10:31,451 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=b66397bb9319,35939,1733897428441 2024-12-11T06:10:31,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T06:10:31,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=b66397bb9319,35939,1733897428441 in 402 msec 2024-12-11T06:10:31,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T06:10:31,473 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 887 msec 2024-12-11T06:10:31,476 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T06:10:31,476 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T06:10:31,517 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:31,519 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:31,587 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:31,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38969, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:31,705 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4550 sec 2024-12-11T06:10:31,706 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733897431705, completionTime=-1 2024-12-11T06:10:31,710 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T06:10:31,710 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T06:10:31,825 INFO [master/b66397bb9319:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T06:10:31,825 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733897491825 2024-12-11T06:10:31,825 INFO [master/b66397bb9319:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733897551825 2024-12-11T06:10:31,825 INFO [master/b66397bb9319:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 115 msec 2024-12-11T06:10:31,827 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:10:31,848 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,848 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,849 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,851 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-b66397bb9319:39063, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,851 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,853 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:31,860 DEBUG [master/b66397bb9319:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T06:10:31,905 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.065sec 2024-12-11T06:10:31,911 INFO [master/b66397bb9319:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T06:10:31,913 INFO [master/b66397bb9319:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T06:10:31,914 INFO [master/b66397bb9319:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T06:10:31,914 INFO [master/b66397bb9319:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T06:10:31,915 INFO [master/b66397bb9319:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T06:10:31,916 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T06:10:31,916 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T06:10:31,953 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T06:10:31,953 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is b66397bb9319,39063,1733897427482 2024-12-11T06:10:31,957 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@349ef17d 2024-12-11T06:10:31,958 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T06:10:31,961 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48549, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T06:10:31,967 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T06:10:31,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-11T06:10:31,983 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:10:31,984 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:31,985 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-11T06:10:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T06:10:31,993 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:10:32,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d4584, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:32,021 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T06:10:32,021 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T06:10:32,028 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:32,032 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:32,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741837_1013 (size=349) 2024-12-11T06:10:32,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741837_1013 (size=349) 2024-12-11T06:10:32,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741837_1013 (size=349) 2024-12-11T06:10:32,049 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:32,051 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:32,052 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:32,052 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44154919, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:32,052 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:32,056 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:32,057 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0c6631449afb0062d10d83de4eb96abd, NAME => 'hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:32,060 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:32,062 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34290, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:32,065 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54e9c55a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:32,066 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:32,076 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:32,076 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:32,081 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45874, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:32,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741838_1014 (size=36) 2024-12-11T06:10:32,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741838_1014 (size=36) 2024-12-11T06:10:32,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741838_1014 (size=36) 2024-12-11T06:10:32,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=b66397bb9319,39063,1733897427482 2024-12-11T06:10:32,087 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:32,087 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-11T06:10:32,087 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 0c6631449afb0062d10d83de4eb96abd, disabling compactions & flushes 2024-12-11T06:10:32,087 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,088 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/test.cache.data in system properties and HBase conf 2024-12-11T06:10:32,088 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. after waiting 0 ms 2024-12-11T06:10:32,088 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T06:10:32,088 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,088 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0c6631449afb0062d10d83de4eb96abd: Waiting for close lock at 1733897432087Disabling compacts and flushes for region at 1733897432087Disabling writes for close at 1733897432088 (+1 ms)Writing region close event to WAL at 1733897432088Closed at 1733897432088 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir in system properties and HBase conf 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T06:10:32,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T06:10:32,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T06:10:32,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T06:10:32,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T06:10:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T06:10:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T06:10:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T06:10:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T06:10:32,090 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/nfs.dump.dir in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T06:10:32,091 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T06:10:32,097 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:10:32,108 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733897432098"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897432098"}]},"ts":"1733897432098"} 2024-12-11T06:10:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T06:10:32,122 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T06:10:32,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:10:32,130 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897432125"}]},"ts":"1733897432125"} 2024-12-11T06:10:32,139 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-11T06:10:32,140 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:10:32,148 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:10:32,148 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:10:32,148 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:10:32,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:10:32,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:10:32,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:10:32,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:10:32,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:10:32,149 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:10:32,149 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:10:32,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, ASSIGN}] 2024-12-11T06:10:32,179 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, ASSIGN 2024-12-11T06:10:32,182 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:10:32,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741839_1015 (size=592039) 2024-12-11T06:10:32,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741839_1015 (size=592039) 2024-12-11T06:10:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741839_1015 (size=592039) 2024-12-11T06:10:32,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T06:10:32,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T06:10:32,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741840_1016 (size=1663647) 2024-12-11T06:10:32,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T06:10:32,336 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T06:10:32,337 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:32,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, ASSIGN because future has completed 2024-12-11T06:10:32,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:10:32,636 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,636 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c6631449afb0062d10d83de4eb96abd, NAME => 'hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:10:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T06:10:32,639 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. service=AccessControlService 2024-12-11T06:10:32,639 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:32,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:32,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,640 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,652 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,656 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c6631449afb0062d10d83de4eb96abd columnFamilyName l 2024-12-11T06:10:32,656 DEBUG [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:32,657 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] regionserver.HStore(327): Store=0c6631449afb0062d10d83de4eb96abd/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:32,658 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,660 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,661 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,662 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,662 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,667 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,672 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:32,673 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 0c6631449afb0062d10d83de4eb96abd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72361332, jitterRate=0.07826787233352661}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:32,673 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:10:32,676 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0c6631449afb0062d10d83de4eb96abd: Running coprocessor pre-open hook at 1733897432640Writing region info on filesystem at 1733897432641 (+1 ms)Initializing all the Stores at 1733897432650 (+9 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897432650Cleaning up temporary data from old regions at 1733897432662 (+12 ms)Running coprocessor post-open hooks at 1733897432674 (+12 ms)Region opened successfully at 1733897432676 (+2 ms) 2024-12-11T06:10:32,678 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., pid=6, masterSystemTime=1733897432572 2024-12-11T06:10:32,683 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,683 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:10:32,684 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:32,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:10:32,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T06:10:32,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441 in 347 msec 2024-12-11T06:10:32,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T06:10:32,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, ASSIGN in 533 msec 2024-12-11T06:10:32,709 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:10:32,709 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897432709"}]},"ts":"1733897432709"} 2024-12-11T06:10:32,713 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-11T06:10:32,715 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:10:32,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 746 msec 2024-12-11T06:10:33,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T06:10:33,149 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-11T06:10:33,170 DEBUG [master/b66397bb9319:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T06:10:33,172 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T06:10:33,172 INFO [master/b66397bb9319:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=b66397bb9319,39063,1733897427482-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T06:10:34,167 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:34,322 WARN [Thread-372 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:34,655 INFO [Thread-372 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:34,665 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T06:10:34,666 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:34,696 INFO [Thread-372 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:34,696 INFO [Thread-372 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:34,696 INFO [Thread-372 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T06:10:34,701 INFO [Thread-372 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6109c21b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:34,701 INFO [Thread-372 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a8e0d74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:34,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:34,707 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:34,707 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T06:10:34,710 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:34,737 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4096812e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:34,738 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@293bf43a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:34,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741828_1004 (size=1189) 2024-12-11T06:10:34,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741831_1007 (size=1321) 2024-12-11T06:10:34,934 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T06:10:34,935 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-11T06:10:34,936 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T06:10:34,938 INFO [Thread-372 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T06:10:34,991 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:35,377 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:35,938 INFO [Thread-372 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:35,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d311af2{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-32909-hadoop-yarn-common-3_4_1_jar-_-any-5239213951865018407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T06:10:35,998 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@28824225{HTTP/1.1, (http/1.1)}{localhost:32909} 2024-12-11T06:10:35,998 INFO [Thread-372 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@418de80b{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-45449-hadoop-yarn-common-3_4_1_jar-_-any-13950464176116545927/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T06:10:35,998 INFO [Time-limited test {}] server.Server(415): Started @16550ms 2024-12-11T06:10:36,006 INFO [Thread-372 {}] server.AbstractConnector(333): Started ServerConnector@581cb918{HTTP/1.1, (http/1.1)}{localhost:45449} 2024-12-11T06:10:36,006 INFO [Thread-372 {}] server.Server(415): Started @16558ms 2024-12-11T06:10:36,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741841_1017 (size=5) 2024-12-11T06:10:36,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741841_1017 (size=5) 2024-12-11T06:10:36,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741841_1017 (size=5) 2024-12-11T06:10:36,867 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:10:37,077 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T06:10:37,080 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-11T06:10:37,429 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T06:10:37,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:37,474 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T06:10:37,475 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:37,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:37,521 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:37,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T06:10:37,525 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:37,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@310cee80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:37,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ea861f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:37,590 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T06:10:37,591 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T06:10:37,591 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T06:10:37,591 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T06:10:37,603 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:37,627 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:37,925 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:37,939 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ae6f868{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-39277-hadoop-yarn-common-3_4_1_jar-_-any-1551076211131834370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T06:10:37,940 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7310eace{HTTP/1.1, (http/1.1)}{localhost:39277} 2024-12-11T06:10:37,941 INFO [Time-limited test {}] server.Server(415): Started @18492ms 2024-12-11T06:10:38,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:10:38,057 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-11T06:10:38,057 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T06:10:38,057 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T06:10:38,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T06:10:38,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-11T06:10:38,076 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:10:38,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-11T06:10:38,076 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T06:10:38,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-11T06:10:38,077 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:10:38,077 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-11T06:10:38,077 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T06:10:38,077 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T06:10:38,077 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T06:10:38,080 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T06:10:38,233 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-11T06:10:38,236 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:38,285 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-11T06:10:38,286 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T06:10:38,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T06:10:38,301 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T06:10:38,301 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T06:10:38,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T06:10:38,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18f5cdc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,AVAILABLE} 2024-12-11T06:10:38,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69d482a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-11T06:10:38,399 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-11T06:10:38,399 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-11T06:10:38,399 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-11T06:10:38,399 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-11T06:10:38,416 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:38,456 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:38,617 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-11T06:10:38,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@41960f37{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/java.io.tmpdir/jetty-localhost-34975-hadoop-yarn-common-3_4_1_jar-_-any-16111963186027957306/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T06:10:38,625 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@762aa3c9{HTTP/1.1, (http/1.1)}{localhost:34975} 2024-12-11T06:10:38,626 INFO [Time-limited test {}] server.Server(415): Started @19177ms 2024-12-11T06:10:38,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-11T06:10:38,660 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:10:38,702 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=711, OpenFileDescriptor=753, MaxFileDescriptor=1048576, SystemLoadAverage=409, ProcessCount=11, AvailableMemoryMB=7045 2024-12-11T06:10:38,704 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=711 is superior to 500 2024-12-11T06:10:38,715 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T06:10:38,722 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is b66397bb9319,39063,1733897427482 2024-12-11T06:10:38,722 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7b9e8b39 2024-12-11T06:10:38,723 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T06:10:38,726 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T06:10:38,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:10:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:38,734 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:10:38,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-11T06:10:38,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:10:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T06:10:38,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741842_1018 (size=458) 2024-12-11T06:10:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741842_1018 (size=458) 2024-12-11T06:10:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741842_1018 (size=458) 2024-12-11T06:10:38,770 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1194fc3df4ce2c61f3cc6db1a5cd3843, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:38,773 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ec1b979a6a9bb8061812d28abf727fcd, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:38,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741844_1020 (size=83) 2024-12-11T06:10:38,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741844_1020 (size=83) 2024-12-11T06:10:38,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741844_1020 (size=83) 2024-12-11T06:10:38,841 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:38,841 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing ec1b979a6a9bb8061812d28abf727fcd, disabling compactions & flushes 2024-12-11T06:10:38,841 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:38,841 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:38,842 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. after waiting 1 ms 2024-12-11T06:10:38,842 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:38,842 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:38,842 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for ec1b979a6a9bb8061812d28abf727fcd: Waiting for close lock at 1733897438841Disabling compacts and flushes for region at 1733897438841Disabling writes for close at 1733897438842 (+1 ms)Writing region close event to WAL at 1733897438842Closed at 1733897438842 2024-12-11T06:10:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T06:10:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741843_1019 (size=83) 2024-12-11T06:10:38,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741843_1019 (size=83) 2024-12-11T06:10:38,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741843_1019 (size=83) 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 1194fc3df4ce2c61f3cc6db1a5cd3843, disabling compactions & flushes 2024-12-11T06:10:38,858 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. after waiting 0 ms 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:38,858 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:38,858 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1194fc3df4ce2c61f3cc6db1a5cd3843: Waiting for close lock at 1733897438858Disabling compacts and flushes for region at 1733897438858Disabling writes for close at 1733897438858Writing region close event to WAL at 1733897438858Closed at 1733897438858 2024-12-11T06:10:38,862 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:10:38,862 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733897438862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897438862"}]},"ts":"1733897438862"} 2024-12-11T06:10:38,863 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733897438862"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897438862"}]},"ts":"1733897438862"} 2024-12-11T06:10:38,915 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:10:38,939 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:10:38,940 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897438940"}]},"ts":"1733897438940"} 2024-12-11T06:10:38,948 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T06:10:38,948 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:10:38,951 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:10:38,951 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:10:38,951 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:10:38,951 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:10:38,952 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, ASSIGN}] 2024-12-11T06:10:38,955 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, ASSIGN 2024-12-11T06:10:38,955 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, ASSIGN 2024-12-11T06:10:38,957 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:10:38,957 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:10:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T06:10:39,108 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:10:39,108 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ec1b979a6a9bb8061812d28abf727fcd, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:10:39,109 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1194fc3df4ce2c61f3cc6db1a5cd3843, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:39,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, ASSIGN because future has completed 2024-12-11T06:10:39,114 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:10:39,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, ASSIGN because future has completed 2024-12-11T06:10:39,118 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:10:39,271 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:39,292 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:39,292 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 1194fc3df4ce2c61f3cc6db1a5cd3843, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:10:39,293 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. service=AccessControlService 2024-12-11T06:10:39,293 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:39,294 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,294 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:39,294 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,294 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,301 INFO [StoreOpener-1194fc3df4ce2c61f3cc6db1a5cd3843-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,302 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60467, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:39,314 INFO [StoreOpener-1194fc3df4ce2c61f3cc6db1a5cd3843-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1194fc3df4ce2c61f3cc6db1a5cd3843 columnFamilyName cf 2024-12-11T06:10:39,325 DEBUG [StoreOpener-1194fc3df4ce2c61f3cc6db1a5cd3843-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:39,326 INFO [StoreOpener-1194fc3df4ce2c61f3cc6db1a5cd3843-1 {}] regionserver.HStore(327): Store=1194fc3df4ce2c61f3cc6db1a5cd3843/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:39,328 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,329 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,330 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,331 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,331 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,334 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,340 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:39,341 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => ec1b979a6a9bb8061812d28abf727fcd, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:10:39,342 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. service=AccessControlService 2024-12-11T06:10:39,343 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:39,343 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,343 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:39,343 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,343 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,349 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:39,350 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 1194fc3df4ce2c61f3cc6db1a5cd3843; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71452080, jitterRate=0.06471896171569824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:39,350 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:39,352 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 1194fc3df4ce2c61f3cc6db1a5cd3843: Running coprocessor pre-open hook at 1733897439294Writing region info on filesystem at 1733897439294Initializing all the Stores at 1733897439297 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897439297Cleaning up temporary data from old regions at 1733897439331 (+34 ms)Running coprocessor post-open hooks at 1733897439350 (+19 ms)Region opened successfully at 1733897439352 (+2 ms) 2024-12-11T06:10:39,354 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843., pid=11, masterSystemTime=1733897439274 2024-12-11T06:10:39,360 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:39,360 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:39,361 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=1194fc3df4ce2c61f3cc6db1a5cd3843, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:10:39,361 INFO [StoreOpener-ec1b979a6a9bb8061812d28abf727fcd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:10:39,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T06:10:39,370 INFO [StoreOpener-ec1b979a6a9bb8061812d28abf727fcd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ec1b979a6a9bb8061812d28abf727fcd columnFamilyName cf 2024-12-11T06:10:39,372 DEBUG [StoreOpener-ec1b979a6a9bb8061812d28abf727fcd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:39,373 INFO [StoreOpener-ec1b979a6a9bb8061812d28abf727fcd-1 {}] regionserver.HStore(327): Store=ec1b979a6a9bb8061812d28abf727fcd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:39,373 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,375 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,376 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,376 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,379 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,385 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:39,387 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened ec1b979a6a9bb8061812d28abf727fcd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63283069, jitterRate=-0.057008787989616394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:39,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-11T06:10:39,387 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:39,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441 in 264 msec 2024-12-11T06:10:39,387 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for ec1b979a6a9bb8061812d28abf727fcd: Running coprocessor pre-open hook at 1733897439344Writing region info on filesystem at 1733897439344Initializing all the Stores at 1733897439353 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897439353Cleaning up temporary data from old regions at 1733897439377 (+24 ms)Running coprocessor post-open hooks at 1733897439387 (+10 ms)Region opened successfully at 1733897439387 2024-12-11T06:10:39,390 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd., pid=10, masterSystemTime=1733897439270 2024-12-11T06:10:39,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, ASSIGN in 435 msec 2024-12-11T06:10:39,393 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:39,394 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:39,395 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=ec1b979a6a9bb8061812d28abf727fcd, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:10:39,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:10:39,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-11T06:10:39,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597 in 287 msec 2024-12-11T06:10:39,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-11T06:10:39,411 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, ASSIGN in 454 msec 2024-12-11T06:10:39,413 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:10:39,413 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897439413"}]},"ts":"1733897439413"} 2024-12-11T06:10:39,417 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T06:10:39,418 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:10:39,423 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T06:10:39,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:10:39,440 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:39,440 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:39,441 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:39,442 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37223, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T06:10:39,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:10:39,450 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:39,471 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:39,472 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:10:39,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:39,477 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:39,479 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:39,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 749 msec 2024-12-11T06:10:39,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:39,491 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:39,491 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T06:10:39,878 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:39,882 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:39,890 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:39,891 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:39,892 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:10:39,898 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:39,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:39,926 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:39,928 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:39,930 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:39,942 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897439942 (current time:1733897439942). 2024-12-11T06:10:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:10:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T06:10:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:10:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cd0dec6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:39,953 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:39,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:39,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:39,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58dd5b68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:39,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:39,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,956 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47414, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:39,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7478d237, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:39,959 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:39,960 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:39,961 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:39,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,972 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bfbda44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:39,975 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb8c4f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:39,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:39,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,978 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47422, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:39,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa403eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:39,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:39,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:39,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:39,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:10:39,989 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:39,989 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T06:10:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:10:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T06:10:40,003 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:10:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T06:10:40,010 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:10:40,024 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:10:40,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741845_1021 (size=215) 2024-12-11T06:10:40,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741845_1021 (size=215) 2024-12-11T06:10:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741845_1021 (size=215) 2024-12-11T06:10:40,040 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:10:40,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd}] 2024-12-11T06:10:40,050 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:40,050 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T06:10:40,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-11T06:10:40,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-11T06:10:40,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:40,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:40,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for ec1b979a6a9bb8061812d28abf727fcd: 2024-12-11T06:10:40,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 1194fc3df4ce2c61f3cc6db1a5cd3843: 2024-12-11T06:10:40,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:40,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:40,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:40,222 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:40,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:10:40,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:10:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741846_1022 (size=86) 2024-12-11T06:10:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741846_1022 (size=86) 2024-12-11T06:10:40,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741846_1022 (size=86) 2024-12-11T06:10:40,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:40,248 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-11T06:10:40,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-11T06:10:40,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:40,251 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:40,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741847_1023 (size=86) 2024-12-11T06:10:40,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741847_1023 (size=86) 2024-12-11T06:10:40,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741847_1023 (size=86) 2024-12-11T06:10:40,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 in 213 msec 2024-12-11T06:10:40,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:40,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-11T06:10:40,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-11T06:10:40,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:40,264 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:40,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-12-11T06:10:40,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd in 221 msec 2024-12-11T06:10:40,269 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:10:40,272 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:10:40,274 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:10:40,275 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:10:40,275 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:40,276 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:10:40,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741848_1024 (size=78) 2024-12-11T06:10:40,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741848_1024 (size=78) 2024-12-11T06:10:40,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741848_1024 (size=78) 2024-12-11T06:10:40,296 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:10:40,296 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,300 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T06:10:40,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741849_1025 (size=713) 2024-12-11T06:10:40,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741849_1025 (size=713) 2024-12-11T06:10:40,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741849_1025 (size=713) 2024-12-11T06:10:40,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:10:40,346 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:10:40,348 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,351 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:10:40,351 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-11T06:10:40,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 354 msec 2024-12-11T06:10:40,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-11T06:10:40,627 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:40,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:10:40,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:10:40,650 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:40,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:40,654 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:40,654 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:10:40,656 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:40,663 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:40,672 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:40,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897440676 (current time:1733897440676). 2024-12-11T06:10:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:10:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T06:10:40,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:10:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@506f7476, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:40,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:40,679 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:40,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:40,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:40,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@613649f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:40,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:40,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,682 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47434, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:40,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26161aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:40,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:40,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:40,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:40,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,688 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@389fd677, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:40,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:40,690 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:40,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31ddf7fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:40,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,692 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47438, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:40,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb30cdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:40,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:40,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:40,697 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:40,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:10:40,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:40,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:40,702 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:40,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T06:10:40,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:10:40,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:40,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T06:10:40,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T06:10:40,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:10:40,709 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:10:40,714 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:10:40,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741850_1026 (size=210) 2024-12-11T06:10:40,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741850_1026 (size=210) 2024-12-11T06:10:40,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741850_1026 (size=210) 2024-12-11T06:10:40,732 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:10:40,732 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd}] 2024-12-11T06:10:40,734 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:40,734 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:40,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T06:10:40,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-11T06:10:40,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:40,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-11T06:10:40,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:40,897 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 1194fc3df4ce2c61f3cc6db1a5cd3843 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T06:10:40,897 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing ec1b979a6a9bb8061812d28abf727fcd 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T06:10:41,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T06:10:41,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd is 71, key is 127e765e868ba950c14e432ab6ef5861/cf:q/1733897440646/Put/seqid=0 2024-12-11T06:10:41,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 is 71, key is 0660ec7b5b0a73b2446ba3aeba51ee0e/cf:q/1733897440642/Put/seqid=0 2024-12-11T06:10:41,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741852_1028 (size=5102) 2024-12-11T06:10:41,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:41,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741852_1028 (size=5102) 2024-12-11T06:10:41,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741852_1028 (size=5102) 2024-12-11T06:10:41,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741851_1027 (size=8172) 2024-12-11T06:10:41,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741851_1027 (size=8172) 2024-12-11T06:10:41,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741851_1027 (size=8172) 2024-12-11T06:10:41,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:41,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:41,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:41,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/.tmp/cf/a5b513fded4e401b9e073d63adbac34f, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=1194fc3df4ce2c61f3cc6db1a5cd3843] 2024-12-11T06:10:41,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/.tmp/cf/03e49046fb6e444aa3afe1c538aaf1c7, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=ec1b979a6a9bb8061812d28abf727fcd] 2024-12-11T06:10:41,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/.tmp/cf/a5b513fded4e401b9e073d63adbac34f is 224, key is 01e35ad71cd5eb0b29dcc65f2bdae97a1/cf:q/1733897440642/Put/seqid=0 2024-12-11T06:10:41,231 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/.tmp/cf/03e49046fb6e444aa3afe1c538aaf1c7 is 224, key is 1077c38cb7eb5e27fa04f43056351f300/cf:q/1733897440646/Put/seqid=0 2024-12-11T06:10:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741854_1030 (size=15719) 2024-12-11T06:10:41,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741854_1030 (size=15719) 2024-12-11T06:10:41,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741854_1030 (size=15719) 2024-12-11T06:10:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741853_1029 (size=5978) 2024-12-11T06:10:41,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741853_1029 (size=5978) 2024-12-11T06:10:41,247 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/.tmp/cf/03e49046fb6e444aa3afe1c538aaf1c7 2024-12-11T06:10:41,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741853_1029 (size=5978) 2024-12-11T06:10:41,252 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/.tmp/cf/a5b513fded4e401b9e073d63adbac34f 2024-12-11T06:10:41,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/.tmp/cf/03e49046fb6e444aa3afe1c538aaf1c7 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7 2024-12-11T06:10:41,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T06:10:41,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/.tmp/cf/a5b513fded4e401b9e073d63adbac34f as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f 2024-12-11T06:10:41,345 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7, entries=47, sequenceid=6, filesize=15.4 K 2024-12-11T06:10:41,351 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f, entries=3, sequenceid=6, filesize=5.8 K 2024-12-11T06:10:41,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for ec1b979a6a9bb8061812d28abf727fcd in 460ms, sequenceid=6, compaction requested=false 2024-12-11T06:10:41,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 1194fc3df4ce2c61f3cc6db1a5cd3843 in 460ms, sequenceid=6, compaction requested=false 2024-12-11T06:10:41,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-11T06:10:41,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for ec1b979a6a9bb8061812d28abf727fcd: 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 1194fc3df4ce2c61f3cc6db1a5cd3843: 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7] hfiles 2024-12-11T06:10:41,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f] hfiles 2024-12-11T06:10:41,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741855_1031 (size=125) 2024-12-11T06:10:41,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741855_1031 (size=125) 2024-12-11T06:10:41,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741855_1031 (size=125) 2024-12-11T06:10:41,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:10:41,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-11T06:10:41,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-11T06:10:41,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:41,421 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:41,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ec1b979a6a9bb8061812d28abf727fcd in 690 msec 2024-12-11T06:10:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741856_1032 (size=125) 2024-12-11T06:10:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741856_1032 (size=125) 2024-12-11T06:10:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741856_1032 (size=125) 2024-12-11T06:10:41,437 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:10:41,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-11T06:10:41,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-11T06:10:41,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:41,439 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:41,445 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-11T06:10:41,446 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843 in 709 msec 2024-12-11T06:10:41,446 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:10:41,447 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:10:41,449 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:10:41,449 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:10:41,450 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:41,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843] hfiles 2024-12-11T06:10:41,453 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:10:41,453 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:10:41,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741857_1033 (size=309) 2024-12-11T06:10:41,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741857_1033 (size=309) 2024-12-11T06:10:41,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741857_1033 (size=309) 2024-12-11T06:10:41,474 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:10:41,474 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,476 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741858_1034 (size=1023) 2024-12-11T06:10:41,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741858_1034 (size=1023) 2024-12-11T06:10:41,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741858_1034 (size=1023) 2024-12-11T06:10:41,503 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:10:41,521 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:10:41,522 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,524 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:10:41,524 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-11T06:10:41,527 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 821 msec 2024-12-11T06:10:41,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-11T06:10:41,847 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:41,870 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:41,871 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:41,872 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:41,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:41,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:41,874 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T06:10:41,874 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T06:10:41,875 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54406, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:41,875 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-11T06:10:41,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:10:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:41,881 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:10:41,881 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:41,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-11T06:10:41,882 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:10:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T06:10:41,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741859_1035 (size=390) 2024-12-11T06:10:41,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741859_1035 (size=390) 2024-12-11T06:10:41,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741859_1035 (size=390) 2024-12-11T06:10:41,898 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1f7b173781412d5e682785e48ae8f597, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:41,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741860_1036 (size=75) 2024-12-11T06:10:41,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741860_1036 (size=75) 2024-12-11T06:10:41,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741860_1036 (size=75) 2024-12-11T06:10:41,912 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:41,912 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 1f7b173781412d5e682785e48ae8f597, disabling compactions & flushes 2024-12-11T06:10:41,912 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:41,912 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:41,912 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. after waiting 0 ms 2024-12-11T06:10:41,912 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:41,912 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:41,913 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1f7b173781412d5e682785e48ae8f597: Waiting for close lock at 1733897441912Disabling compacts and flushes for region at 1733897441912Disabling writes for close at 1733897441912Writing region close event to WAL at 1733897441912Closed at 1733897441912 2024-12-11T06:10:41,915 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:10:41,915 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733897441915"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897441915"}]},"ts":"1733897441915"} 2024-12-11T06:10:41,919 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T06:10:41,921 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:10:41,921 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897441921"}]},"ts":"1733897441921"} 2024-12-11T06:10:41,925 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-11T06:10:41,925 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:10:41,926 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:10:41,926 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:10:41,927 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:10:41,927 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:10:41,927 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:10:41,927 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:10:41,927 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:10:41,927 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:10:41,927 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:10:41,927 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:10:41,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, ASSIGN}] 2024-12-11T06:10:41,928 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, ASSIGN 2024-12-11T06:10:41,930 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:10:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T06:10:42,080 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T06:10:42,081 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=1f7b173781412d5e682785e48ae8f597, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:42,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, ASSIGN because future has completed 2024-12-11T06:10:42,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:10:42,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T06:10:42,239 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:10:42,241 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48289, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:10:42,248 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:42,248 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 1f7b173781412d5e682785e48ae8f597, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:10:42,249 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. service=AccessControlService 2024-12-11T06:10:42,249 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:42,249 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,249 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:42,250 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,250 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,252 INFO [StoreOpener-1f7b173781412d5e682785e48ae8f597-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,261 INFO [StoreOpener-1f7b173781412d5e682785e48ae8f597-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1f7b173781412d5e682785e48ae8f597 columnFamilyName cf 2024-12-11T06:10:42,261 DEBUG [StoreOpener-1f7b173781412d5e682785e48ae8f597-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:42,262 INFO [StoreOpener-1f7b173781412d5e682785e48ae8f597-1 {}] regionserver.HStore(327): Store=1f7b173781412d5e682785e48ae8f597/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:42,263 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,264 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,265 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,266 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,266 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,269 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,272 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:10:42,273 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 1f7b173781412d5e682785e48ae8f597; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68732365, jitterRate=0.024192050099372864}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:42,273 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:42,275 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 1f7b173781412d5e682785e48ae8f597: Running coprocessor pre-open hook at 1733897442250Writing region info on filesystem at 1733897442250Initializing all the Stores at 1733897442252 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897442252Cleaning up temporary data from old regions at 1733897442266 (+14 ms)Running coprocessor post-open hooks at 1733897442273 (+7 ms)Region opened successfully at 1733897442275 (+2 ms) 2024-12-11T06:10:42,276 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., pid=20, masterSystemTime=1733897442239 2024-12-11T06:10:42,280 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:42,280 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:42,281 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=1f7b173781412d5e682785e48ae8f597, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:42,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:10:42,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-11T06:10:42,293 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663 in 204 msec 2024-12-11T06:10:42,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-11T06:10:42,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, ASSIGN in 365 msec 2024-12-11T06:10:42,297 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:10:42,298 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897442297"}]},"ts":"1733897442297"} 2024-12-11T06:10:42,300 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-11T06:10:42,301 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:10:42,301 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-11T06:10:42,305 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T06:10:42,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:42,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:42,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:10:42,429 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,429 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:10:42,431 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 550 msec 2024-12-11T06:10:42,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-11T06:10:42,508 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:42,509 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:42,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T06:10:43,082 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-11T06:10:44,798 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:10:44,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T06:10:44,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T06:10:44,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741861_1037 (size=134217728) 2024-12-11T06:10:47,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T06:10:47,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T06:10:47,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741862_1038 (size=134217728) 2024-12-11T06:10:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:48,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T06:10:48,057 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:48,058 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T06:10:48,162 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733897442518/Put/seqid=0 2024-12-11T06:10:48,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T06:10:48,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T06:10:48,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741863_1039 (size=51979256) 2024-12-11T06:10:48,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d89354b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:48,184 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:48,185 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:48,196 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:48,198 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:48,198 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:48,198 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3028913f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:48,198 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:48,199 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:48,212 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:48,214 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46788, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:48,225 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ed7489, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:48,226 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:48,228 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:48,229 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:48,243 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:48,272 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:42173/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T06:10:48,272 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T06:10:48,274 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is b66397bb9319,39063,1733897427482 2024-12-11T06:10:48,274 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@66f78b65 2024-12-11T06:10:48,275 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T06:10:48,290 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T06:10:48,301 WARN [IPC Server handler 1 on default port 42173 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T06:10:48,310 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2] 2024-12-11T06:10:48,315 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:48,336 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:48,344 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:48,418 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:42173/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-11T06:10:48,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:48,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:48,474 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54161, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-11T06:10:48,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-11T06:10:48,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:54161 deadline: 1733897508475, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-11T06:10:48,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:10:48,488 WARN [IPC Server handler 1 on default port 42173 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-11T06:10:48,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:42173/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file for inclusion in 1f7b173781412d5e682785e48ae8f597/cf 2024-12-11T06:10:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-11T06:10:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-11T06:10:48,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:42173/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-11T06:10:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(2603): Flush status journal for 1f7b173781412d5e682785e48ae8f597: 2024-12-11T06:10:48,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:42173/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/output/cf/test_file to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/staging/jenkins__testExportFileSystemStateWithSplitRegion__uqh0cthpfq1o8uei3iabijt5ph87sm7fcrugbf97r76mn65i7fe23a5shcdb34ud/cf/test_file 2024-12-11T06:10:48,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/staging/jenkins__testExportFileSystemStateWithSplitRegion__uqh0cthpfq1o8uei3iabijt5ph87sm7fcrugbf97r76mn65i7fe23a5shcdb34ud/cf/test_file as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ 2024-12-11T06:10:48,593 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/staging/jenkins__testExportFileSystemStateWithSplitRegion__uqh0cthpfq1o8uei3iabijt5ph87sm7fcrugbf97r76mn65i7fe23a5shcdb34ud/cf/test_file into 1f7b173781412d5e682785e48ae8f597/cf as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ - updating store file list. 2024-12-11T06:10:48,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T06:10:48,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ into 1f7b173781412d5e682785e48ae8f597/cf 2024-12-11T06:10:48,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/staging/jenkins__testExportFileSystemStateWithSplitRegion__uqh0cthpfq1o8uei3iabijt5ph87sm7fcrugbf97r76mn65i7fe23a5shcdb34ud/cf/test_file into 1f7b173781412d5e682785e48ae8f597/cf (new location: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_) 2024-12-11T06:10:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/staging/jenkins__testExportFileSystemStateWithSplitRegion__uqh0cthpfq1o8uei3iabijt5ph87sm7fcrugbf97r76mn65i7fe23a5shcdb34ud/cf/test_file 2024-12-11T06:10:48,659 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T06:10:48,660 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:10:48,660 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:48,664 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=b66397bb9319:42579 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T06:10:48,665 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-11T06:10:48,666 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2 from cache 2024-12-11T06:10:48,668 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:48,669 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:48,669 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-12-11T06:10:48,676 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:48,678 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597., hostname=b66397bb9319,42579,1733897428663, seqNum=2] 2024-12-11T06:10:48,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=b66397bb9319,42579,1733897428663 2024-12-11T06:10:48,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=1f7b173781412d5e682785e48ae8f597, daughterA=74fccf93b2a8460b5626d9affabd96b1, daughterB=1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:48,713 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=1f7b173781412d5e682785e48ae8f597, daughterA=74fccf93b2a8460b5626d9affabd96b1, daughterB=1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:48,713 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=1f7b173781412d5e682785e48ae8f597, daughterA=74fccf93b2a8460b5626d9affabd96b1, daughterB=1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:48,713 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=1f7b173781412d5e682785e48ae8f597, daughterA=74fccf93b2a8460b5626d9affabd96b1, daughterB=1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:48,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, UNASSIGN}] 2024-12-11T06:10:48,724 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, UNASSIGN 2024-12-11T06:10:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T06:10:48,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=1f7b173781412d5e682785e48ae8f597, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:48,730 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, UNASSIGN because future has completed 2024-12-11T06:10:48,731 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T06:10:48,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:10:48,778 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=b66397bb9319:35939 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-11T06:10:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T06:10:48,900 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:48,900 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T06:10:48,901 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 1f7b173781412d5e682785e48ae8f597, disabling compactions & flushes 2024-12-11T06:10:48,901 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:48,902 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:48,902 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. after waiting 0 ms 2024-12-11T06:10:48,902 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:48,937 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-11T06:10:48,942 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:10:48,942 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597. 2024-12-11T06:10:48,942 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 1f7b173781412d5e682785e48ae8f597: Waiting for close lock at 1733897448901Running coprocessor pre-close hooks at 1733897448901Disabling compacts and flushes for region at 1733897448901Disabling writes for close at 1733897448902 (+1 ms)Writing region close event to WAL at 1733897448929 (+27 ms)Running coprocessor post-close hooks at 1733897448938 (+9 ms)Closed at 1733897448942 (+4 ms) 2024-12-11T06:10:48,947 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:48,948 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=1f7b173781412d5e682785e48ae8f597, regionState=CLOSED 2024-12-11T06:10:48,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:10:48,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-11T06:10:48,959 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 1f7b173781412d5e682785e48ae8f597, server=b66397bb9319,42579,1733897428663 in 222 msec 2024-12-11T06:10:48,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-11T06:10:48,965 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1f7b173781412d5e682785e48ae8f597, UNASSIGN in 236 msec 2024-12-11T06:10:48,979 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:48,987 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=1f7b173781412d5e682785e48ae8f597, threads=1 2024-12-11T06:10:49,008 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ for region: 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:49,021 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T06:10:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T06:10:49,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741864_1040 (size=21) 2024-12-11T06:10:49,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741864_1040 (size=21) 2024-12-11T06:10:49,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741864_1040 (size=21) 2024-12-11T06:10:49,075 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-11T06:10:49,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741865_1041 (size=21) 2024-12-11T06:10:49,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741865_1041 (size=21) 2024-12-11T06:10:49,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741865_1041 (size=21) 2024-12-11T06:10:49,097 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ for region: 1f7b173781412d5e682785e48ae8f597 2024-12-11T06:10:49,100 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 1f7b173781412d5e682785e48ae8f597 Daughter A: [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597] storefiles, Daughter B: [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597] storefiles. 2024-12-11T06:10:49,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741866_1042 (size=76) 2024-12-11T06:10:49,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741866_1042 (size=76) 2024-12-11T06:10:49,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741866_1042 (size=76) 2024-12-11T06:10:49,162 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:49,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741867_1043 (size=76) 2024-12-11T06:10:49,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741867_1043 (size=76) 2024-12-11T06:10:49,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741867_1043 (size=76) 2024-12-11T06:10:49,264 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:49,279 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T06:10:49,282 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-11T06:10:49,285 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733897449284"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733897449284"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733897449284"}]},"ts":"1733897449284"} 2024-12-11T06:10:49,285 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733897449284"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897449284"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733897449284"}]},"ts":"1733897449284"} 2024-12-11T06:10:49,285 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733897449284"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897449284"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733897449284"}]},"ts":"1733897449284"} 2024-12-11T06:10:49,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, ASSIGN}] 2024-12-11T06:10:49,327 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, ASSIGN 2024-12-11T06:10:49,328 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, ASSIGN 2024-12-11T06:10:49,329 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, ASSIGN; state=SPLITTING_NEW, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:10:49,329 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, ASSIGN; state=SPLITTING_NEW, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:10:49,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T06:10:49,480 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:10:49,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=1e17f5e1db604aa0171849cd45511105, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:49,481 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=74fccf93b2a8460b5626d9affabd96b1, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:49,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, ASSIGN because future has completed 2024-12-11T06:10:49,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:10:49,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, ASSIGN because future has completed 2024-12-11T06:10:49,488 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:10:49,646 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:10:49,647 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 1e17f5e1db604aa0171849cd45511105, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.', STARTKEY => '5', ENDKEY => ''} 2024-12-11T06:10:49,647 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. service=AccessControlService 2024-12-11T06:10:49,648 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:49,648 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,648 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:49,648 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,648 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,651 INFO [StoreOpener-1e17f5e1db604aa0171849cd45511105-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,652 INFO [StoreOpener-1e17f5e1db604aa0171849cd45511105-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1e17f5e1db604aa0171849cd45511105 columnFamilyName cf 2024-12-11T06:10:49,653 DEBUG [StoreOpener-1e17f5e1db604aa0171849cd45511105-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:49,668 DEBUG [StoreFileOpener-1e17f5e1db604aa0171849cd45511105-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597: NONE, but ROW specified in column family configuration 2024-12-11T06:10:49,689 DEBUG [StoreOpener-1e17f5e1db604aa0171849cd45511105-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_-top 2024-12-11T06:10:49,690 INFO [StoreOpener-1e17f5e1db604aa0171849cd45511105-1 {}] regionserver.HStore(327): Store=1e17f5e1db604aa0171849cd45511105/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:49,690 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,691 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,694 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,694 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,694 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,697 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,699 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 1e17f5e1db604aa0171849cd45511105; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67160762, jitterRate=7.733404636383057E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:49,699 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:49,700 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 1e17f5e1db604aa0171849cd45511105: Running coprocessor pre-open hook at 1733897449649Writing region info on filesystem at 1733897449649Initializing all the Stores at 1733897449650 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897449650Cleaning up temporary data from old regions at 1733897449694 (+44 ms)Running coprocessor post-open hooks at 1733897449699 (+5 ms)Region opened successfully at 1733897449700 (+1 ms) 2024-12-11T06:10:49,702 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105., pid=26, masterSystemTime=1733897449640 2024-12-11T06:10:49,702 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.,because compaction is disabled. 2024-12-11T06:10:49,706 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:10:49,706 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:10:49,706 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:10:49,706 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 74fccf93b2a8460b5626d9affabd96b1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.', STARTKEY => '', ENDKEY => '5'} 2024-12-11T06:10:49,706 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=1e17f5e1db604aa0171849cd45511105, regionState=OPEN, openSeqNum=7, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:49,706 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. service=AccessControlService 2024-12-11T06:10:49,707 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:10:49,707 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,707 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:10:49,707 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,707 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:10:49,712 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b66397bb9319,42579,1733897428663, table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T06:10:49,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-12-11T06:10:49,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663 in 228 msec 2024-12-11T06:10:49,717 INFO [StoreOpener-74fccf93b2a8460b5626d9affabd96b1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,719 INFO [StoreOpener-74fccf93b2a8460b5626d9affabd96b1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74fccf93b2a8460b5626d9affabd96b1 columnFamilyName cf 2024-12-11T06:10:49,720 DEBUG [StoreOpener-74fccf93b2a8460b5626d9affabd96b1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:49,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, ASSIGN in 412 msec 2024-12-11T06:10:49,742 DEBUG [StoreFileOpener-74fccf93b2a8460b5626d9affabd96b1-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597: NONE, but ROW specified in column family configuration 2024-12-11T06:10:49,750 DEBUG [StoreOpener-74fccf93b2a8460b5626d9affabd96b1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_-bottom 2024-12-11T06:10:49,750 INFO [StoreOpener-74fccf93b2a8460b5626d9affabd96b1-1 {}] regionserver.HStore(327): Store=74fccf93b2a8460b5626d9affabd96b1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:10:49,750 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,752 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,754 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,754 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,754 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,757 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,758 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 74fccf93b2a8460b5626d9affabd96b1; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64543398, jitterRate=-0.03822842240333557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:10:49,758 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 74fccf93b2a8460b5626d9affabd96b1: Running coprocessor pre-open hook at 1733897449708Writing region info on filesystem at 1733897449708Initializing all the Stores at 1733897449709 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897449709Cleaning up temporary data from old regions at 1733897449754 (+45 ms)Running coprocessor post-open hooks at 1733897449758 (+4 ms)Region opened successfully at 1733897449758 2024-12-11T06:10:49,760 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1., pid=27, masterSystemTime=1733897449640 2024-12-11T06:10:49,760 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.,because compaction is disabled. 2024-12-11T06:10:49,766 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:10:49,766 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:10:49,766 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=74fccf93b2a8460b5626d9affabd96b1, regionState=OPEN, openSeqNum=7, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:10:49,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:10:49,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-12-11T06:10:49,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663 in 284 msec 2024-12-11T06:10:49,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=21 2024-12-11T06:10:49,780 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, ASSIGN in 471 msec 2024-12-11T06:10:49,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=1f7b173781412d5e682785e48ae8f597, daughterA=74fccf93b2a8460b5626d9affabd96b1, daughterB=1e17f5e1db604aa0171849cd45511105 in 1.0780 sec 2024-12-11T06:10:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-11T06:10:49,868 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-11T06:10:49,868 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:49,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897449875 (current time:1733897449875). 2024-12-11T06:10:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:10:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-11T06:10:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:10:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ea7ba78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:49,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:49,880 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:49,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:49,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:49,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3631299d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:49,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:49,881 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,883 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:49,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@518cdc65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:49,885 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:49,886 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:49,887 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:49,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,891 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338a5eff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:10:49,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:10:49,898 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:10:49,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:10:49,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:10:49,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@719ea73c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:10:49,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:10:49,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,902 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:10:49,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79497e7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:10:49,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:10:49,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:10:49,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:10:49,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:10:49,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:10:49,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:10:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:10:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:10:49,919 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:10:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-11T06:10:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:10:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-11T06:10:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T06:10:49,925 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:10:49,926 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:10:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T06:10:49,937 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:10:49,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741868_1044 (size=197) 2024-12-11T06:10:49,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741868_1044 (size=197) 2024-12-11T06:10:49,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741868_1044 (size=197) 2024-12-11T06:10:49,980 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:10:49,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74fccf93b2a8460b5626d9affabd96b1}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e17f5e1db604aa0171849cd45511105}] 2024-12-11T06:10:49,982 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:49,983 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:50,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T06:10:50,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-11T06:10:50,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-11T06:10:50,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:10:50,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 74fccf93b2a8460b5626d9affabd96b1: 2024-12-11T06:10:50,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:10:50,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:50,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 1e17f5e1db604aa0171849cd45511105: 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_-bottom] hfiles 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_-top] hfiles 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741870_1046 (size=182) 2024-12-11T06:10:50,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741869_1045 (size=182) 2024-12-11T06:10:50,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741870_1046 (size=182) 2024-12-11T06:10:50,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741870_1046 (size=182) 2024-12-11T06:10:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741869_1045 (size=182) 2024-12-11T06:10:50,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741869_1045 (size=182) 2024-12-11T06:10:50,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:10:50,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-11T06:10:50,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:10:50,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-11T06:10:50,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-11T06:10:50,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:50,163 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:10:50,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-11T06:10:50,164 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:50,165 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:10:50,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 74fccf93b2a8460b5626d9affabd96b1 in 185 msec 2024-12-11T06:10:50,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-11T06:10:50,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1e17f5e1db604aa0171849cd45511105 in 186 msec 2024-12-11T06:10:50,170 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:10:50,173 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:10:50,173 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:10:50,174 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:10:50,175 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_] hfiles 2024-12-11T06:10:50,175 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ 2024-12-11T06:10:50,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741871_1047 (size=129) 2024-12-11T06:10:50,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741871_1047 (size=129) 2024-12-11T06:10:50,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741871_1047 (size=129) 2024-12-11T06:10:50,193 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 1f7b173781412d5e682785e48ae8f597, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:10:50,196 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:10:50,196 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,198 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741872_1048 (size=891) 2024-12-11T06:10:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741872_1048 (size=891) 2024-12-11T06:10:50,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741872_1048 (size=891) 2024-12-11T06:10:50,241 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:10:50,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T06:10:50,253 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:10:50,254 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,256 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:10:50,256 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-11T06:10:50,260 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 335 msec 2024-12-11T06:10:50,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-11T06:10:50,557 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:10:50,557 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557 2024-12-11T06:10:50,558 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:50,601 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:10:50,601 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,606 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:10:50,614 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:10:50,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741873_1049 (size=891) 2024-12-11T06:10:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741873_1049 (size=891) 2024-12-11T06:10:50,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741873_1049 (size=891) 2024-12-11T06:10:50,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741874_1050 (size=197) 2024-12-11T06:10:50,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741874_1050 (size=197) 2024-12-11T06:10:50,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741874_1050 (size=197) 2024-12-11T06:10:50,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:50,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:50,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-10912204581526387188.jar 2024-12-11T06:10:51,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,957 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-18013449970054984326.jar 2024-12-11T06:10:51,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:10:51,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:10:51,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:10:51,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:10:51,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:10:51,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:10:51,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:10:51,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:10:51,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:10:51,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:10:51,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:10:51,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:10:51,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:10:51,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:10:51,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:10:51,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:10:51,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:10:51,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:10:51,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:10:52,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741875_1051 (size=131440) 2024-12-11T06:10:52,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741875_1051 (size=131440) 2024-12-11T06:10:52,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741875_1051 (size=131440) 2024-12-11T06:10:52,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741876_1052 (size=6426220) 2024-12-11T06:10:52,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741876_1052 (size=6426220) 2024-12-11T06:10:52,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741876_1052 (size=6426220) 2024-12-11T06:10:52,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741877_1053 (size=4188619) 2024-12-11T06:10:52,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741877_1053 (size=4188619) 2024-12-11T06:10:52,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741877_1053 (size=4188619) 2024-12-11T06:10:52,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741878_1054 (size=1323991) 2024-12-11T06:10:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741878_1054 (size=1323991) 2024-12-11T06:10:52,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741878_1054 (size=1323991) 2024-12-11T06:10:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741879_1055 (size=903542) 2024-12-11T06:10:52,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741879_1055 (size=903542) 2024-12-11T06:10:52,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741879_1055 (size=903542) 2024-12-11T06:10:52,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T06:10:52,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T06:10:52,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741880_1056 (size=8360360) 2024-12-11T06:10:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741881_1057 (size=1877034) 2024-12-11T06:10:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741881_1057 (size=1877034) 2024-12-11T06:10:52,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741881_1057 (size=1877034) 2024-12-11T06:10:52,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741882_1058 (size=77835) 2024-12-11T06:10:52,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741882_1058 (size=77835) 2024-12-11T06:10:52,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741882_1058 (size=77835) 2024-12-11T06:10:52,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741883_1059 (size=30949) 2024-12-11T06:10:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741883_1059 (size=30949) 2024-12-11T06:10:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741883_1059 (size=30949) 2024-12-11T06:10:52,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741884_1060 (size=1597213) 2024-12-11T06:10:52,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741884_1060 (size=1597213) 2024-12-11T06:10:52,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741884_1060 (size=1597213) 2024-12-11T06:10:52,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741885_1061 (size=4695811) 2024-12-11T06:10:52,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741885_1061 (size=4695811) 2024-12-11T06:10:52,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741885_1061 (size=4695811) 2024-12-11T06:10:52,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741886_1062 (size=232957) 2024-12-11T06:10:52,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741886_1062 (size=232957) 2024-12-11T06:10:52,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741886_1062 (size=232957) 2024-12-11T06:10:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741887_1063 (size=127628) 2024-12-11T06:10:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741887_1063 (size=127628) 2024-12-11T06:10:52,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741887_1063 (size=127628) 2024-12-11T06:10:52,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741888_1064 (size=20406) 2024-12-11T06:10:52,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741888_1064 (size=20406) 2024-12-11T06:10:52,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741888_1064 (size=20406) 2024-12-11T06:10:52,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741889_1065 (size=5175431) 2024-12-11T06:10:52,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741889_1065 (size=5175431) 2024-12-11T06:10:52,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741889_1065 (size=5175431) 2024-12-11T06:10:52,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741890_1066 (size=217634) 2024-12-11T06:10:52,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741890_1066 (size=217634) 2024-12-11T06:10:52,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741890_1066 (size=217634) 2024-12-11T06:10:52,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741891_1067 (size=443172) 2024-12-11T06:10:52,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741891_1067 (size=443172) 2024-12-11T06:10:52,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741891_1067 (size=443172) 2024-12-11T06:10:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T06:10:52,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T06:10:52,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741892_1068 (size=1832290) 2024-12-11T06:10:52,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741893_1069 (size=322274) 2024-12-11T06:10:52,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741893_1069 (size=322274) 2024-12-11T06:10:52,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741893_1069 (size=322274) 2024-12-11T06:10:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741894_1070 (size=503880) 2024-12-11T06:10:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741894_1070 (size=503880) 2024-12-11T06:10:52,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741894_1070 (size=503880) 2024-12-11T06:10:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741895_1071 (size=29229) 2024-12-11T06:10:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741895_1071 (size=29229) 2024-12-11T06:10:52,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741895_1071 (size=29229) 2024-12-11T06:10:52,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741896_1072 (size=24096) 2024-12-11T06:10:52,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741896_1072 (size=24096) 2024-12-11T06:10:52,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741896_1072 (size=24096) 2024-12-11T06:10:52,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741897_1073 (size=111872) 2024-12-11T06:10:52,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741897_1073 (size=111872) 2024-12-11T06:10:52,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741897_1073 (size=111872) 2024-12-11T06:10:52,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741898_1074 (size=45609) 2024-12-11T06:10:52,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741898_1074 (size=45609) 2024-12-11T06:10:52,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741898_1074 (size=45609) 2024-12-11T06:10:52,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741899_1075 (size=136454) 2024-12-11T06:10:52,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741899_1075 (size=136454) 2024-12-11T06:10:52,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741899_1075 (size=136454) 2024-12-11T06:10:52,809 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:10:52,814 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-11T06:10:52,820 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=1f7b173781412d5e682785e48ae8f597-8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_. 2024-12-11T06:10:52,820 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=1f7b173781412d5e682785e48ae8f597-8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_. 2024-12-11T06:10:52,820 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-11T06:10:52,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741900_1076 (size=244) 2024-12-11T06:10:52,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741900_1076 (size=244) 2024-12-11T06:10:52,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741900_1076 (size=244) 2024-12-11T06:10:52,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741901_1077 (size=17) 2024-12-11T06:10:52,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741901_1077 (size=17) 2024-12-11T06:10:52,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741901_1077 (size=17) 2024-12-11T06:10:52,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741902_1078 (size=304054) 2024-12-11T06:10:52,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741902_1078 (size=304054) 2024-12-11T06:10:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741902_1078 (size=304054) 2024-12-11T06:10:53,360 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:10:53,360 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:10:53,753 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0001_000001 (auth:SIMPLE) from 127.0.0.1:48724 2024-12-11T06:10:54,445 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:10:55,085 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-11T06:10:56,287 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:11:03,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0001_000001 (auth:SIMPLE) from 127.0.0.1:41434 2024-12-11T06:11:03,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741903_1079 (size=349752) 2024-12-11T06:11:03,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741903_1079 (size=349752) 2024-12-11T06:11:03,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741903_1079 (size=349752) 2024-12-11T06:11:05,424 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0001_000001 (auth:SIMPLE) from 127.0.0.1:56388 2024-12-11T06:11:13,968 INFO [master/b66397bb9319:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-11T06:11:13,968 INFO [master/b66397bb9319:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-11T06:11:24,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1194fc3df4ce2c61f3cc6db1a5cd3843, had cached 0 bytes from a total of 5978 2024-12-11T06:11:24,343 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec1b979a6a9bb8061812d28abf727fcd, had cached 0 bytes from a total of 15719 2024-12-11T06:11:26,287 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:11:31,858 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0c6631449afb0062d10d83de4eb96abd changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:11:31,859 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1194fc3df4ce2c61f3cc6db1a5cd3843 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:11:31,859 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ec1b979a6a9bb8061812d28abf727fcd changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:11:34,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e17f5e1db604aa0171849cd45511105, had cached 0 bytes from a total of 320414712 2024-12-11T06:11:34,707 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74fccf93b2a8460b5626d9affabd96b1, had cached 0 bytes from a total of 320414712 2024-12-11T06:11:49,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T06:11:49,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T06:11:49,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741904_1080 (size=134217728) 2024-12-11T06:11:56,287 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:12:09,294 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1194fc3df4ce2c61f3cc6db1a5cd3843, had cached 0 bytes from a total of 5978 2024-12-11T06:12:09,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ec1b979a6a9bb8061812d28abf727fcd, had cached 0 bytes from a total of 15719 2024-12-11T06:12:19,649 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1e17f5e1db604aa0171849cd45511105, had cached 0 bytes from a total of 320414712 2024-12-11T06:12:19,708 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74fccf93b2a8460b5626d9affabd96b1, had cached 0 bytes from a total of 320414712 2024-12-11T06:12:26,287 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:12:26,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T06:12:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T06:12:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741905_1081 (size=134217728) 2024-12-11T06:12:30,710 WARN [regionserver/b66397bb9319:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-12-11T06:12:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T06:12:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T06:12:41,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741906_1082 (size=51979256) 2024-12-11T06:12:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741907_1083 (size=17520) 2024-12-11T06:12:41,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741907_1083 (size=17520) 2024-12-11T06:12:41,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741907_1083 (size=17520) 2024-12-11T06:12:41,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741908_1084 (size=482) 2024-12-11T06:12:41,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741908_1084 (size=482) 2024-12-11T06:12:41,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741908_1084 (size=482) 2024-12-11T06:12:41,190 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000002/launch_container.sh] 2024-12-11T06:12:41,191 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000002/container_tokens] 2024-12-11T06:12:41,191 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000002/sysfs] 2024-12-11T06:12:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741909_1085 (size=17520) 2024-12-11T06:12:41,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741909_1085 (size=17520) 2024-12-11T06:12:41,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741909_1085 (size=17520) 2024-12-11T06:12:41,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741910_1086 (size=349752) 2024-12-11T06:12:41,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741910_1086 (size=349752) 2024-12-11T06:12:41,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741910_1086 (size=349752) 2024-12-11T06:12:41,232 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0001_000001 (auth:SIMPLE) from 127.0.0.1:55630 2024-12-11T06:12:43,083 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:12:43,085 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:12:43,094 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,094 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:12:43,095 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:12:43,095 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,096 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T06:12:43,096 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T06:12:43,096 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,097 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-11T06:12:43,097 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897450557/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-11T06:12:43,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T06:12:43,120 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897563120"}]},"ts":"1733897563120"} 2024-12-11T06:12:43,123 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T06:12:43,123 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T06:12:43,125 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-11T06:12:43,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, UNASSIGN}] 2024-12-11T06:12:43,131 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, UNASSIGN 2024-12-11T06:12:43,131 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, UNASSIGN 2024-12-11T06:12:43,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1e17f5e1db604aa0171849cd45511105, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:12:43,132 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=74fccf93b2a8460b5626d9affabd96b1, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:12:43,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, UNASSIGN because future has completed 2024-12-11T06:12:43,136 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:12:43,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:12:43,136 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, UNASSIGN because future has completed 2024-12-11T06:12:43,137 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:12:43,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:12:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T06:12:43,289 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:12:43,289 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:12:43,289 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 74fccf93b2a8460b5626d9affabd96b1, disabling compactions & flushes 2024-12-11T06:12:43,289 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:12:43,289 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:12:43,289 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. after waiting 0 ms 2024-12-11T06:12:43,289 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:12:43,296 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T06:12:43,296 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:12:43,296 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1. 2024-12-11T06:12:43,296 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 74fccf93b2a8460b5626d9affabd96b1: Waiting for close lock at 1733897563289Running coprocessor pre-close hooks at 1733897563289Disabling compacts and flushes for region at 1733897563289Disabling writes for close at 1733897563289Writing region close event to WAL at 1733897563290 (+1 ms)Running coprocessor post-close hooks at 1733897563296 (+6 ms)Closed at 1733897563296 2024-12-11T06:12:43,299 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:12:43,299 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:12:43,299 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:12:43,299 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 1e17f5e1db604aa0171849cd45511105, disabling compactions & flushes 2024-12-11T06:12:43,299 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:12:43,299 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:12:43,299 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. after waiting 0 ms 2024-12-11T06:12:43,299 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:12:43,300 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=74fccf93b2a8460b5626d9affabd96b1, regionState=CLOSED 2024-12-11T06:12:43,304 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-11T06:12:43,305 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:12:43,305 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105. 2024-12-11T06:12:43,306 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 1e17f5e1db604aa0171849cd45511105: Waiting for close lock at 1733897563299Running coprocessor pre-close hooks at 1733897563299Disabling compacts and flushes for region at 1733897563299Disabling writes for close at 1733897563299Writing region close event to WAL at 1733897563300 (+1 ms)Running coprocessor post-close hooks at 1733897563305 (+5 ms)Closed at 1733897563305 2024-12-11T06:12:43,309 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:12:43,309 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 1e17f5e1db604aa0171849cd45511105 2024-12-11T06:12:43,309 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=1e17f5e1db604aa0171849cd45511105, regionState=CLOSED 2024-12-11T06:12:43,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:12:43,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-11T06:12:43,319 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 74fccf93b2a8460b5626d9affabd96b1, server=b66397bb9319,42579,1733897428663 in 180 msec 2024-12-11T06:12:43,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-11T06:12:43,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=74fccf93b2a8460b5626d9affabd96b1, UNASSIGN in 190 msec 2024-12-11T06:12:43,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 1e17f5e1db604aa0171849cd45511105, server=b66397bb9319,42579,1733897428663 in 181 msec 2024-12-11T06:12:43,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-11T06:12:43,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=1e17f5e1db604aa0171849cd45511105, UNASSIGN in 192 msec 2024-12-11T06:12:43,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-11T06:12:43,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 201 msec 2024-12-11T06:12:43,329 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897563329"}]},"ts":"1733897563329"} 2024-12-11T06:12:43,331 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T06:12:43,331 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T06:12:43,334 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 219 msec 2024-12-11T06:12:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-11T06:12:43,437 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:12:43,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,448 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,449 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,453 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52395, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-11T06:12:43,454 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,461 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597 2024-12-11T06:12:43,462 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:12:43,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:43,463 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-11T06:12:43,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:43,464 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:43,466 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/recovered.edits] 2024-12-11T06:12:43,466 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/recovered.edits] 2024-12-11T06:12:43,468 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105 2024-12-11T06:12:43,471 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/recovered.edits] 2024-12-11T06:12:43,476 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_ 2024-12-11T06:12:43,476 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 2024-12-11T06:12:43,477 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/cf/8972435da1724dd0941aa3bf5fae7ba8_SeqId_4_.1f7b173781412d5e682785e48ae8f597 2024-12-11T06:12:43,481 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/recovered.edits/10.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1/recovered.edits/10.seqid 2024-12-11T06:12:43,482 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/74fccf93b2a8460b5626d9affabd96b1 2024-12-11T06:12:43,483 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/recovered.edits/6.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597/recovered.edits/6.seqid 2024-12-11T06:12:43,483 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1f7b173781412d5e682785e48ae8f597 2024-12-11T06:12:43,489 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/recovered.edits/10.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105/recovered.edits/10.seqid 2024-12-11T06:12:43,490 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportFileSystemStateWithSplitRegion/1e17f5e1db604aa0171849cd45511105 2024-12-11T06:12:43,490 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-11T06:12:43,493 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-11T06:12:43,506 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T06:12:43,510 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T06:12:43,512 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,512 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T06:12:43,513 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897563512"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,513 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897563512"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,513 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897563512"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,517 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-11T06:12:43,517 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1f7b173781412d5e682785e48ae8f597, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897441877.1f7b173781412d5e682785e48ae8f597.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 74fccf93b2a8460b5626d9affabd96b1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733897448699.74fccf93b2a8460b5626d9affabd96b1.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 1e17f5e1db604aa0171849cd45511105, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733897448699.1e17f5e1db604aa0171849cd45511105.', STARTKEY => '5', ENDKEY => ''}] 2024-12-11T06:12:43,517 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T06:12:43,517 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897563517"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,520 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-11T06:12:43,521 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 79 msec 2024-12-11T06:12:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-11T06:12:43,568 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,569 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:12:43,569 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T06:12:43,574 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897563573"}]},"ts":"1733897563573"} 2024-12-11T06:12:43,576 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-11T06:12:43,576 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-11T06:12:43,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-11T06:12:43,579 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, UNASSIGN}] 2024-12-11T06:12:43,580 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, UNASSIGN 2024-12-11T06:12:43,580 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, UNASSIGN 2024-12-11T06:12:43,581 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=1194fc3df4ce2c61f3cc6db1a5cd3843, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:12:43,581 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=ec1b979a6a9bb8061812d28abf727fcd, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:12:43,583 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, UNASSIGN because future has completed 2024-12-11T06:12:43,584 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:12:43,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:12:43,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, UNASSIGN because future has completed 2024-12-11T06:12:43,585 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:12:43,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:12:43,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T06:12:43,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60009, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:12:43,739 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60169, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:12:43,739 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:12:43,739 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:12:43,739 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 1194fc3df4ce2c61f3cc6db1a5cd3843, disabling compactions & flushes 2024-12-11T06:12:43,739 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:12:43,740 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:12:43,740 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. after waiting 0 ms 2024-12-11T06:12:43,740 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:12:43,740 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:12:43,741 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:12:43,741 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing ec1b979a6a9bb8061812d28abf727fcd, disabling compactions & flushes 2024-12-11T06:12:43,741 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:12:43,741 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:12:43,741 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. after waiting 0 ms 2024-12-11T06:12:43,741 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:12:43,746 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:12:43,746 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:12:43,747 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:12:43,747 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:12:43,747 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843. 2024-12-11T06:12:43,747 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd. 2024-12-11T06:12:43,747 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for ec1b979a6a9bb8061812d28abf727fcd: Waiting for close lock at 1733897563741Running coprocessor pre-close hooks at 1733897563741Disabling compacts and flushes for region at 1733897563741Disabling writes for close at 1733897563741Writing region close event to WAL at 1733897563742 (+1 ms)Running coprocessor post-close hooks at 1733897563747 (+5 ms)Closed at 1733897563747 2024-12-11T06:12:43,747 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 1194fc3df4ce2c61f3cc6db1a5cd3843: Waiting for close lock at 1733897563739Running coprocessor pre-close hooks at 1733897563739Disabling compacts and flushes for region at 1733897563739Disabling writes for close at 1733897563740 (+1 ms)Writing region close event to WAL at 1733897563741 (+1 ms)Running coprocessor post-close hooks at 1733897563747 (+6 ms)Closed at 1733897563747 2024-12-11T06:12:43,749 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:12:43,750 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=ec1b979a6a9bb8061812d28abf727fcd, regionState=CLOSED 2024-12-11T06:12:43,750 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:12:43,751 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=1194fc3df4ce2c61f3cc6db1a5cd3843, regionState=CLOSED 2024-12-11T06:12:43,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:12:43,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:12:43,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-12-11T06:12:43,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure ec1b979a6a9bb8061812d28abf727fcd, server=b66397bb9319,44539,1733897428597 in 167 msec 2024-12-11T06:12:43,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-12-11T06:12:43,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=ec1b979a6a9bb8061812d28abf727fcd, UNASSIGN in 177 msec 2024-12-11T06:12:43,758 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 1194fc3df4ce2c61f3cc6db1a5cd3843, server=b66397bb9319,35939,1733897428441 in 171 msec 2024-12-11T06:12:43,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-11T06:12:43,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=1194fc3df4ce2c61f3cc6db1a5cd3843, UNASSIGN in 179 msec 2024-12-11T06:12:43,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-11T06:12:43,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 189 msec 2024-12-11T06:12:43,773 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897563772"}]},"ts":"1733897563772"} 2024-12-11T06:12:43,775 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-11T06:12:43,775 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-11T06:12:43,778 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 207 msec 2024-12-11T06:12:43,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-11T06:12:43,888 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:12:43,889 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,891 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,892 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,895 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,897 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:12:43,897 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:12:43,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,900 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-11T06:12:43,901 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/recovered.edits] 2024-12-11T06:12:43,901 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/recovered.edits] 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:43,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-11T06:12:43,907 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/cf/03e49046fb6e444aa3afe1c538aaf1c7 2024-12-11T06:12:43,907 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/cf/a5b513fded4e401b9e073d63adbac34f 2024-12-11T06:12:43,911 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd/recovered.edits/9.seqid 2024-12-11T06:12:43,911 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843/recovered.edits/9.seqid 2024-12-11T06:12:43,911 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:12:43,912 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSplitRegion/1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:12:43,912 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-11T06:12:43,913 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-11T06:12:43,914 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-11T06:12:43,919 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e2024121173d1525b955648378c528dc9ff4a2da1_1194fc3df4ce2c61f3cc6db1a5cd3843 2024-12-11T06:12:43,919 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241211c6868404bff9486b8063ddc1b4587199_ec1b979a6a9bb8061812d28abf727fcd 2024-12-11T06:12:43,920 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-11T06:12:43,922 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,925 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-11T06:12:43,928 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-11T06:12:43,929 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,930 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-11T06:12:43,930 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897563930"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,930 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897563930"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,932 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:12:43,932 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1194fc3df4ce2c61f3cc6db1a5cd3843, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733897438727.1194fc3df4ce2c61f3cc6db1a5cd3843.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ec1b979a6a9bb8061812d28abf727fcd, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733897438727.ec1b979a6a9bb8061812d28abf727fcd.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:12:43,933 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-11T06:12:43,933 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897563933"}]},"ts":"9223372036854775807"} 2024-12-11T06:12:43,934 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-11T06:12:43,935 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:43,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 46 msec 2024-12-11T06:12:44,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-11T06:12:44,007 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:44,007 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-11T06:12:44,023 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T06:12:44,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:44,028 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T06:12:44,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:44,031 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-11T06:12:44,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:44,063 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=761 (was 711) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:34944 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:50716 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:56690 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 28036) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_883212016_1 at /127.0.0.1:53698 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:37891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-1401 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/b66397bb9319:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=770 (was 753) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=519 (was 409) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=761 (was 7045) 2024-12-11T06:12:44,064 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=761 is superior to 500 2024-12-11T06:12:44,081 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=761, OpenFileDescriptor=770, MaxFileDescriptor=1048576, SystemLoadAverage=519, ProcessCount=17, AvailableMemoryMB=760 2024-12-11T06:12:44,081 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=761 is superior to 500 2024-12-11T06:12:44,083 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:12:44,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:12:44,086 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:12:44,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-11T06:12:44,087 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:12:44,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T06:12:44,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741911_1087 (size=442) 2024-12-11T06:12:44,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741911_1087 (size=442) 2024-12-11T06:12:44,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741911_1087 (size=442) 2024-12-11T06:12:44,102 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7d13a1d92f4b47b752ff985be8bc0775, NAME => 'testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:12:44,102 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5de43fee139c35f78f381d985b89b0e2, NAME => 'testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:12:44,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741913_1089 (size=67) 2024-12-11T06:12:44,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741913_1089 (size=67) 2024-12-11T06:12:44,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741913_1089 (size=67) 2024-12-11T06:12:44,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741912_1088 (size=67) 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 7d13a1d92f4b47b752ff985be8bc0775, disabling compactions & flushes 2024-12-11T06:12:44,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741912_1088 (size=67) 2024-12-11T06:12:44,118 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. after waiting 0 ms 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,118 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,118 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7d13a1d92f4b47b752ff985be8bc0775: Waiting for close lock at 1733897564118Disabling compacts and flushes for region at 1733897564118Disabling writes for close at 1733897564118Writing region close event to WAL at 1733897564118Closed at 1733897564118 2024-12-11T06:12:44,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741912_1088 (size=67) 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 5de43fee139c35f78f381d985b89b0e2, disabling compactions & flushes 2024-12-11T06:12:44,119 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. after waiting 0 ms 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,119 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,119 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5de43fee139c35f78f381d985b89b0e2: Waiting for close lock at 1733897564119Disabling compacts and flushes for region at 1733897564119Disabling writes for close at 1733897564119Writing region close event to WAL at 1733897564119Closed at 1733897564119 2024-12-11T06:12:44,121 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:12:44,121 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733897564121"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897564121"}]},"ts":"1733897564121"} 2024-12-11T06:12:44,121 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733897564121"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897564121"}]},"ts":"1733897564121"} 2024-12-11T06:12:44,124 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:12:44,125 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:12:44,125 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897564125"}]},"ts":"1733897564125"} 2024-12-11T06:12:44,127 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-11T06:12:44,128 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:12:44,131 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:12:44,131 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:12:44,131 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:12:44,131 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:12:44,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, ASSIGN}] 2024-12-11T06:12:44,133 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, ASSIGN 2024-12-11T06:12:44,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, ASSIGN 2024-12-11T06:12:44,134 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:12:44,134 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:12:44,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T06:12:44,285 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:12:44,285 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=7d13a1d92f4b47b752ff985be8bc0775, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:12:44,285 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=5de43fee139c35f78f381d985b89b0e2, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:12:44,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, ASSIGN because future has completed 2024-12-11T06:12:44,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:12:44,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, ASSIGN because future has completed 2024-12-11T06:12:44,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:12:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T06:12:44,444 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,444 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,444 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 7d13a1d92f4b47b752ff985be8bc0775, NAME => 'testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:12:44,444 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 5de43fee139c35f78f381d985b89b0e2, NAME => 'testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. service=AccessControlService 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. service=AccessControlService 2024-12-11T06:12:44,445 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:12:44,445 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,445 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,447 INFO [StoreOpener-7d13a1d92f4b47b752ff985be8bc0775-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,447 INFO [StoreOpener-5de43fee139c35f78f381d985b89b0e2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,448 INFO [StoreOpener-7d13a1d92f4b47b752ff985be8bc0775-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7d13a1d92f4b47b752ff985be8bc0775 columnFamilyName cf 2024-12-11T06:12:44,448 INFO [StoreOpener-5de43fee139c35f78f381d985b89b0e2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5de43fee139c35f78f381d985b89b0e2 columnFamilyName cf 2024-12-11T06:12:44,450 DEBUG [StoreOpener-5de43fee139c35f78f381d985b89b0e2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:44,450 DEBUG [StoreOpener-7d13a1d92f4b47b752ff985be8bc0775-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:44,450 INFO [StoreOpener-5de43fee139c35f78f381d985b89b0e2-1 {}] regionserver.HStore(327): Store=5de43fee139c35f78f381d985b89b0e2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:12:44,450 INFO [StoreOpener-7d13a1d92f4b47b752ff985be8bc0775-1 {}] regionserver.HStore(327): Store=7d13a1d92f4b47b752ff985be8bc0775/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:12:44,451 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,451 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,452 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,452 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,452 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,455 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,455 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,458 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:12:44,458 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:12:44,458 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 7d13a1d92f4b47b752ff985be8bc0775; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65339133, jitterRate=-0.026371046900749207}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:12:44,459 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,459 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 5de43fee139c35f78f381d985b89b0e2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67796955, jitterRate=0.010253354907035828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:12:44,459 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,459 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 7d13a1d92f4b47b752ff985be8bc0775: Running coprocessor pre-open hook at 1733897564445Writing region info on filesystem at 1733897564445Initializing all the Stores at 1733897564446 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897564446Cleaning up temporary data from old regions at 1733897564453 (+7 ms)Running coprocessor post-open hooks at 1733897564459 (+6 ms)Region opened successfully at 1733897564459 2024-12-11T06:12:44,459 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 5de43fee139c35f78f381d985b89b0e2: Running coprocessor pre-open hook at 1733897564445Writing region info on filesystem at 1733897564446 (+1 ms)Initializing all the Stores at 1733897564446Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897564446Cleaning up temporary data from old regions at 1733897564453 (+7 ms)Running coprocessor post-open hooks at 1733897564459 (+6 ms)Region opened successfully at 1733897564459 2024-12-11T06:12:44,460 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2., pid=49, masterSystemTime=1733897564441 2024-12-11T06:12:44,460 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775., pid=48, masterSystemTime=1733897564440 2024-12-11T06:12:44,462 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,462 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,463 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=7d13a1d92f4b47b752ff985be8bc0775, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:12:44,463 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,463 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=5de43fee139c35f78f381d985b89b0e2, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:12:44,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:12:44,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:12:44,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-11T06:12:44,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441 in 178 msec 2024-12-11T06:12:44,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-11T06:12:44,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663 in 178 msec 2024-12-11T06:12:44,470 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, ASSIGN in 336 msec 2024-12-11T06:12:44,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-11T06:12:44,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, ASSIGN in 338 msec 2024-12-11T06:12:44,472 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:12:44,473 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897564472"}]},"ts":"1733897564472"} 2024-12-11T06:12:44,474 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-11T06:12:44,475 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:12:44,475 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-11T06:12:44,478 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T06:12:44,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:44,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:44,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:44,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:12:44,482 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:44,482 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:44,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:44,483 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-11T06:12:44,484 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 399 msec 2024-12-11T06:12:44,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-11T06:12:44,717 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T06:12:44,718 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:44,721 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-11T06:12:44,721 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,721 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:12:44,723 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:44,729 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:44,733 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:12:44,735 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:12:44,736 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:44,739 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T06:12:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897564739 (current time:1733897564739). 2024-12-11T06:12:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:12:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T06:12:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:12:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d2d6d16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:12:44,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:12:44,741 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:12:44,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:12:44,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:12:44,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@481727d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:12:44,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:12:44,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,743 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48410, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:12:44,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9b58b58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:12:44,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:12:44,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:12:44,746 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:12:44,747 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:12:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:12:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,747 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:12:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@465868d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:12:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:12:44,749 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:12:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:12:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:12:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e6b523e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:12:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:12:44,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,750 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48428, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:12:44,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c4ac5e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:44,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:12:44,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:12:44,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:12:44,753 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:12:44,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:12:44,756 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:12:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:12:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:44,757 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:12:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T06:12:44,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:12:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T06:12:44,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T06:12:44,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T06:12:44,760 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:12:44,761 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:12:44,763 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:12:44,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741914_1090 (size=167) 2024-12-11T06:12:44,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741914_1090 (size=167) 2024-12-11T06:12:44,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741914_1090 (size=167) 2024-12-11T06:12:44,773 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:12:44,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2}] 2024-12-11T06:12:44,774 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,774 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T06:12:44,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-11T06:12:44,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 7d13a1d92f4b47b752ff985be8bc0775: 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 5de43fee139c35f78f381d985b89b0e2: 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T06:12:44,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. for emptySnaptb0-testExportWithTargetName completed. 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:12:44,928 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:12:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741915_1091 (size=70) 2024-12-11T06:12:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741916_1092 (size=70) 2024-12-11T06:12:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741915_1091 (size=70) 2024-12-11T06:12:44,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741916_1092 (size=70) 2024-12-11T06:12:44,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741915_1091 (size=70) 2024-12-11T06:12:44,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-11T06:12:44,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741916_1092 (size=70) 2024-12-11T06:12:44,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:44,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-11T06:12:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-11T06:12:44,940 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-11T06:12:44,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,940 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:44,941 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:44,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 in 169 msec 2024-12-11T06:12:44,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-12-11T06:12:44,945 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 in 169 msec 2024-12-11T06:12:44,945 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:12:44,946 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:12:44,947 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:12:44,947 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:12:44,947 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:44,948 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:12:44,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741917_1093 (size=62) 2024-12-11T06:12:44,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741917_1093 (size=62) 2024-12-11T06:12:44,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741917_1093 (size=62) 2024-12-11T06:12:44,961 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:12:44,961 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-11T06:12:44,962 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-11T06:12:44,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741918_1094 (size=649) 2024-12-11T06:12:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741918_1094 (size=649) 2024-12-11T06:12:44,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741918_1094 (size=649) 2024-12-11T06:12:44,976 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:12:44,981 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:12:44,981 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-11T06:12:44,983 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:12:44,983 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-11T06:12:44,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 225 msec 2024-12-11T06:12:45,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-11T06:12:45,077 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T06:12:45,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:12:45,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:12:45,090 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:45,093 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-11T06:12:45,093 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:45,093 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:12:45,095 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:45,101 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:45,108 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-11T06:12:45,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T06:12:45,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897565111 (current time:1733897565111). 2024-12-11T06:12:45,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:12:45,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-11T06:12:45,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:12:45,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49ccb4de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:12:45,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:12:45,113 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:12:45,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:12:45,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:12:45,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@289d0d4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:12:45,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:12:45,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,114 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48442, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:12:45,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e36358b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:12:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:12:45,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:12:45,117 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:12:45,118 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:12:45,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:12:45,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,119 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:12:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@83aaeb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:12:45,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:12:45,120 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:12:45,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:12:45,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:12:45,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa15226, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:12:45,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:12:45,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,122 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48464, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:12:45,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a67e442, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:12:45,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:12:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:12:45,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:12:45,125 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:12:45,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:12:45,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:12:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:12:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:12:45,128 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:12:45,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-11T06:12:45,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:12:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-11T06:12:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T06:12:45,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T06:12:45,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:12:45,133 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:12:45,135 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:12:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741919_1095 (size=162) 2024-12-11T06:12:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741919_1095 (size=162) 2024-12-11T06:12:45,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741919_1095 (size=162) 2024-12-11T06:12:45,145 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:12:45,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2}] 2024-12-11T06:12:45,146 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:45,146 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T06:12:45,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-11T06:12:45,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-11T06:12:45,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:45,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:45,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 7d13a1d92f4b47b752ff985be8bc0775 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T06:12:45,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 5de43fee139c35f78f381d985b89b0e2 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T06:12:45,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 is 71, key is 003b96b1fa990920648cbe8ddbcf33c8/cf:q/1733897565084/Put/seqid=0 2024-12-11T06:12:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741920_1096 (size=5241) 2024-12-11T06:12:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741920_1096 (size=5241) 2024-12-11T06:12:45,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 is 71, key is 111b8b5c015df41c066a58ec891f2ade/cf:q/1733897565087/Put/seqid=0 2024-12-11T06:12:45,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741920_1096 (size=5241) 2024-12-11T06:12:45,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:45,336 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:45,337 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/.tmp/cf/8e3bccba89914e129f67edc156d72b85, store: [table=testtb-testExportWithTargetName family=cf region=7d13a1d92f4b47b752ff985be8bc0775] 2024-12-11T06:12:45,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/.tmp/cf/8e3bccba89914e129f67edc156d72b85 is 208, key is 04d397e517e82c82ae42e2015bbd6df74/cf:q/1733897565084/Put/seqid=0 2024-12-11T06:12:45,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741921_1097 (size=8031) 2024-12-11T06:12:45,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741921_1097 (size=8031) 2024-12-11T06:12:45,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741921_1097 (size=8031) 2024-12-11T06:12:45,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:45,348 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:45,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/.tmp/cf/da8fa6b187b64266a4d97e4c8655ef77, store: [table=testtb-testExportWithTargetName family=cf region=5de43fee139c35f78f381d985b89b0e2] 2024-12-11T06:12:45,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/.tmp/cf/da8fa6b187b64266a4d97e4c8655ef77 is 208, key is 1c5296fe610c052e6f2e377bba1790ca0/cf:q/1733897565087/Put/seqid=0 2024-12-11T06:12:45,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741922_1098 (size=6320) 2024-12-11T06:12:45,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741922_1098 (size=6320) 2024-12-11T06:12:45,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741922_1098 (size=6320) 2024-12-11T06:12:45,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/.tmp/cf/8e3bccba89914e129f67edc156d72b85 2024-12-11T06:12:45,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/.tmp/cf/8e3bccba89914e129f67edc156d72b85 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85 2024-12-11T06:12:45,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741923_1099 (size=14541) 2024-12-11T06:12:45,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741923_1099 (size=14541) 2024-12-11T06:12:45,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741923_1099 (size=14541) 2024-12-11T06:12:45,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/.tmp/cf/da8fa6b187b64266a4d97e4c8655ef77 2024-12-11T06:12:45,373 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85, entries=5, sequenceid=6, filesize=6.2 K 2024-12-11T06:12:45,374 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 7d13a1d92f4b47b752ff985be8bc0775 in 76ms, sequenceid=6, compaction requested=false 2024-12-11T06:12:45,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-11T06:12:45,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/.tmp/cf/da8fa6b187b64266a4d97e4c8655ef77 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 7d13a1d92f4b47b752ff985be8bc0775: 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. for snaptb0-testExportWithTargetName completed. 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85] hfiles 2024-12-11T06:12:45,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85 for snapshot=snaptb0-testExportWithTargetName 2024-12-11T06:12:45,381 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77, entries=45, sequenceid=6, filesize=14.2 K 2024-12-11T06:12:45,382 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 5de43fee139c35f78f381d985b89b0e2 in 82ms, sequenceid=6, compaction requested=false 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 5de43fee139c35f78f381d985b89b0e2: 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. for snaptb0-testExportWithTargetName completed. 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77] hfiles 2024-12-11T06:12:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77 for snapshot=snaptb0-testExportWithTargetName 2024-12-11T06:12:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741924_1100 (size=109) 2024-12-11T06:12:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741924_1100 (size=109) 2024-12-11T06:12:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741924_1100 (size=109) 2024-12-11T06:12:45,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:12:45,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-11T06:12:45,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-11T06:12:45,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:45,390 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:45,392 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775 in 246 msec 2024-12-11T06:12:45,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741925_1101 (size=109) 2024-12-11T06:12:45,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741925_1101 (size=109) 2024-12-11T06:12:45,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741925_1101 (size=109) 2024-12-11T06:12:45,398 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:12:45,398 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-11T06:12:45,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-11T06:12:45,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:45,399 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:45,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-11T06:12:45,401 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5de43fee139c35f78f381d985b89b0e2 in 254 msec 2024-12-11T06:12:45,402 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:12:45,403 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:12:45,404 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:12:45,404 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:12:45,404 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:12:45,405 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775] hfiles 2024-12-11T06:12:45,405 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:12:45,405 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:12:45,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741926_1102 (size=293) 2024-12-11T06:12:45,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741926_1102 (size=293) 2024-12-11T06:12:45,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741926_1102 (size=293) 2024-12-11T06:12:45,415 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:12:45,415 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-11T06:12:45,416 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-11T06:12:45,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741927_1103 (size=959) 2024-12-11T06:12:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741927_1103 (size=959) 2024-12-11T06:12:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741927_1103 (size=959) 2024-12-11T06:12:45,435 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:12:45,443 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:12:45,444 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T06:12:45,445 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:12:45,445 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-11T06:12:45,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T06:12:45,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 316 msec 2024-12-11T06:12:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-11T06:12:45,757 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T06:12:45,757 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757 2024-12-11T06:12:45,758 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:12:45,789 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:12:45,789 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T06:12:45,791 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:12:45,796 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-11T06:12:45,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741928_1104 (size=162) 2024-12-11T06:12:45,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741928_1104 (size=162) 2024-12-11T06:12:45,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741928_1104 (size=162) 2024-12-11T06:12:45,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741929_1105 (size=959) 2024-12-11T06:12:45,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741929_1105 (size=959) 2024-12-11T06:12:45,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741929_1105 (size=959) 2024-12-11T06:12:45,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741930_1106 (size=154) 2024-12-11T06:12:45,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741930_1106 (size=154) 2024-12-11T06:12:45,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741930_1106 (size=154) 2024-12-11T06:12:45,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:45,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:45,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,878 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-18252992321552782270.jar 2024-12-11T06:12:46,878 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,879 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-4482908743037919677.jar 2024-12-11T06:12:46,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:12:46,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:12:46,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:12:46,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:12:46,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:12:46,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:12:46,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:12:46,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:12:46,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:12:46,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:12:46,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:12:46,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:12:46,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:12:46,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:12:46,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:12:46,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:12:46,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:12:46,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:12:46,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:12:47,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741931_1107 (size=131440) 2024-12-11T06:12:47,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741931_1107 (size=131440) 2024-12-11T06:12:47,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741931_1107 (size=131440) 2024-12-11T06:12:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T06:12:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T06:12:47,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741932_1108 (size=4188619) 2024-12-11T06:12:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T06:12:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T06:12:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741933_1109 (size=1323991) 2024-12-11T06:12:47,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741934_1110 (size=903542) 2024-12-11T06:12:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741934_1110 (size=903542) 2024-12-11T06:12:47,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741934_1110 (size=903542) 2024-12-11T06:12:47,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T06:12:47,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T06:12:47,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741935_1111 (size=8360360) 2024-12-11T06:12:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T06:12:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T06:12:47,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741936_1112 (size=1877034) 2024-12-11T06:12:47,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741937_1113 (size=77835) 2024-12-11T06:12:47,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741937_1113 (size=77835) 2024-12-11T06:12:47,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741937_1113 (size=77835) 2024-12-11T06:12:47,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741938_1114 (size=30949) 2024-12-11T06:12:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741938_1114 (size=30949) 2024-12-11T06:12:47,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741938_1114 (size=30949) 2024-12-11T06:12:47,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T06:12:47,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T06:12:47,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741939_1115 (size=1597213) 2024-12-11T06:12:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T06:12:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T06:12:47,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741940_1116 (size=4695811) 2024-12-11T06:12:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741941_1117 (size=232957) 2024-12-11T06:12:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741941_1117 (size=232957) 2024-12-11T06:12:47,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741941_1117 (size=232957) 2024-12-11T06:12:47,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741942_1118 (size=127628) 2024-12-11T06:12:47,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741942_1118 (size=127628) 2024-12-11T06:12:47,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741942_1118 (size=127628) 2024-12-11T06:12:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741943_1119 (size=20406) 2024-12-11T06:12:47,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741943_1119 (size=20406) 2024-12-11T06:12:47,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741943_1119 (size=20406) 2024-12-11T06:12:47,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T06:12:47,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T06:12:47,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741944_1120 (size=5175431) 2024-12-11T06:12:47,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741945_1121 (size=217634) 2024-12-11T06:12:47,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741945_1121 (size=217634) 2024-12-11T06:12:47,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741945_1121 (size=217634) 2024-12-11T06:12:47,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T06:12:47,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T06:12:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741946_1122 (size=1832290) 2024-12-11T06:12:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741947_1123 (size=322274) 2024-12-11T06:12:47,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741947_1123 (size=322274) 2024-12-11T06:12:47,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741947_1123 (size=322274) 2024-12-11T06:12:47,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741948_1124 (size=503880) 2024-12-11T06:12:47,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741948_1124 (size=503880) 2024-12-11T06:12:47,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741948_1124 (size=503880) 2024-12-11T06:12:47,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741949_1125 (size=443172) 2024-12-11T06:12:47,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741949_1125 (size=443172) 2024-12-11T06:12:47,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741949_1125 (size=443172) 2024-12-11T06:12:47,336 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0001_000001 (auth:SIMPLE) from 127.0.0.1:46080 2024-12-11T06:12:47,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741950_1126 (size=29229) 2024-12-11T06:12:47,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741950_1126 (size=29229) 2024-12-11T06:12:47,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741950_1126 (size=29229) 2024-12-11T06:12:47,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000001/launch_container.sh] 2024-12-11T06:12:47,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000001/container_tokens] 2024-12-11T06:12:47,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0001/container_1733897436125_0001_01_000001/sysfs] 2024-12-11T06:12:47,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741951_1127 (size=24096) 2024-12-11T06:12:47,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741951_1127 (size=24096) 2024-12-11T06:12:47,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741951_1127 (size=24096) 2024-12-11T06:12:47,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741952_1128 (size=6426220) 2024-12-11T06:12:47,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741952_1128 (size=6426220) 2024-12-11T06:12:47,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741952_1128 (size=6426220) 2024-12-11T06:12:47,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741953_1129 (size=111872) 2024-12-11T06:12:47,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741953_1129 (size=111872) 2024-12-11T06:12:47,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741953_1129 (size=111872) 2024-12-11T06:12:47,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741954_1130 (size=45609) 2024-12-11T06:12:47,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741954_1130 (size=45609) 2024-12-11T06:12:47,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741954_1130 (size=45609) 2024-12-11T06:12:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741955_1131 (size=136454) 2024-12-11T06:12:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741955_1131 (size=136454) 2024-12-11T06:12:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741955_1131 (size=136454) 2024-12-11T06:12:47,420 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:12:47,423 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-11T06:12:47,425 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-11T06:12:47,425 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-11T06:12:47,425 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-11T06:12:47,425 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T06:12:47,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741956_1132 (size=1031) 2024-12-11T06:12:47,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741956_1132 (size=1031) 2024-12-11T06:12:47,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741956_1132 (size=1031) 2024-12-11T06:12:47,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741957_1133 (size=35) 2024-12-11T06:12:47,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741957_1133 (size=35) 2024-12-11T06:12:47,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741957_1133 (size=35) 2024-12-11T06:12:47,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741958_1134 (size=304001) 2024-12-11T06:12:47,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741958_1134 (size=304001) 2024-12-11T06:12:47,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741958_1134 (size=304001) 2024-12-11T06:12:47,502 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:12:47,502 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:12:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T06:12:48,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-11T06:12:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-11T06:12:48,091 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:50354 2024-12-11T06:12:48,832 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:12:53,949 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:46448 2024-12-11T06:12:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741959_1135 (size=349699) 2024-12-11T06:12:54,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741959_1135 (size=349699) 2024-12-11T06:12:54,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741959_1135 (size=349699) 2024-12-11T06:12:56,257 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:52642 2024-12-11T06:12:56,259 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:49494 2024-12-11T06:12:56,288 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:12:57,065 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:52654 2024-12-11T06:12:57,083 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:49502 2024-12-11T06:12:59,335 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:13:02,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741960_1136 (size=14541) 2024-12-11T06:13:02,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741960_1136 (size=14541) 2024-12-11T06:13:02,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741960_1136 (size=14541) 2024-12-11T06:13:02,712 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000002/launch_container.sh] 2024-12-11T06:13:02,712 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000002/container_tokens] 2024-12-11T06:13:02,712 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000002/sysfs] 2024-12-11T06:13:04,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741962_1138 (size=6320) 2024-12-11T06:13:04,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741962_1138 (size=6320) 2024-12-11T06:13:04,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741962_1138 (size=6320) 2024-12-11T06:13:05,047 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000004/launch_container.sh] 2024-12-11T06:13:05,047 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000004/container_tokens] 2024-12-11T06:13:05,048 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000004/sysfs] 2024-12-11T06:13:05,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741963_1139 (size=5241) 2024-12-11T06:13:05,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741963_1139 (size=5241) 2024-12-11T06:13:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741963_1139 (size=5241) 2024-12-11T06:13:05,402 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000005/launch_container.sh] 2024-12-11T06:13:05,402 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000005/container_tokens] 2024-12-11T06:13:05,402 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000005/sysfs] 2024-12-11T06:13:05,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741964_1140 (size=8031) 2024-12-11T06:13:05,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741964_1140 (size=8031) 2024-12-11T06:13:05,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741964_1140 (size=8031) 2024-12-11T06:13:05,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741961_1137 (size=31744) 2024-12-11T06:13:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741961_1137 (size=31744) 2024-12-11T06:13:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741961_1137 (size=31744) 2024-12-11T06:13:05,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741965_1141 (size=465) 2024-12-11T06:13:05,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741965_1141 (size=465) 2024-12-11T06:13:05,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741965_1141 (size=465) 2024-12-11T06:13:05,946 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000003/launch_container.sh] 2024-12-11T06:13:05,947 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000003/container_tokens] 2024-12-11T06:13:05,947 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000003/sysfs] 2024-12-11T06:13:05,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741966_1142 (size=31744) 2024-12-11T06:13:05,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741966_1142 (size=31744) 2024-12-11T06:13:05,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741966_1142 (size=31744) 2024-12-11T06:13:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741967_1143 (size=349699) 2024-12-11T06:13:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741967_1143 (size=349699) 2024-12-11T06:13:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741967_1143 (size=349699) 2024-12-11T06:13:06,038 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:49376 2024-12-11T06:13:06,048 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:49378 2024-12-11T06:13:07,725 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:13:07,728 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:13:07,774 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-11T06:13:07,774 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:13:07,774 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:13:07,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-11T06:13:07,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-11T06:13:07,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-11T06:13:07,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/testExportWithTargetName 2024-12-11T06:13:07,776 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-11T06:13:07,776 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897565757/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-11T06:13:07,791 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-11T06:13:07,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:07,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897587802"}]},"ts":"1733897587802"} 2024-12-11T06:13:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T06:13:07,806 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-11T06:13:07,806 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-11T06:13:07,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-11T06:13:07,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, UNASSIGN}] 2024-12-11T06:13:07,811 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, UNASSIGN 2024-12-11T06:13:07,812 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, UNASSIGN 2024-12-11T06:13:07,813 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=7d13a1d92f4b47b752ff985be8bc0775, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:07,813 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=5de43fee139c35f78f381d985b89b0e2, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:07,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, UNASSIGN because future has completed 2024-12-11T06:13:07,818 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:07,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:07,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, UNASSIGN because future has completed 2024-12-11T06:13:07,820 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:07,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:13:07,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T06:13:07,973 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:13:07,973 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:13:07,973 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:07,973 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:07,973 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 7d13a1d92f4b47b752ff985be8bc0775, disabling compactions & flushes 2024-12-11T06:13:07,973 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing 5de43fee139c35f78f381d985b89b0e2, disabling compactions & flushes 2024-12-11T06:13:07,973 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:13:07,973 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. after waiting 0 ms 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. after waiting 0 ms 2024-12-11T06:13:07,974 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:13:08,000 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:08,000 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:08,001 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:08,001 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2. 2024-12-11T06:13:08,002 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for 5de43fee139c35f78f381d985b89b0e2: Waiting for close lock at 1733897587973Running coprocessor pre-close hooks at 1733897587973Disabling compacts and flushes for region at 1733897587973Disabling writes for close at 1733897587974 (+1 ms)Writing region close event to WAL at 1733897587988 (+14 ms)Running coprocessor post-close hooks at 1733897588001 (+13 ms)Closed at 1733897588001 2024-12-11T06:13:08,002 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:08,002 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775. 2024-12-11T06:13:08,002 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 7d13a1d92f4b47b752ff985be8bc0775: Waiting for close lock at 1733897587973Running coprocessor pre-close hooks at 1733897587973Disabling compacts and flushes for region at 1733897587973Disabling writes for close at 1733897587974 (+1 ms)Writing region close event to WAL at 1733897587987 (+13 ms)Running coprocessor post-close hooks at 1733897588002 (+15 ms)Closed at 1733897588002 2024-12-11T06:13:08,004 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed 5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:13:08,005 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=5de43fee139c35f78f381d985b89b0e2, regionState=CLOSED 2024-12-11T06:13:08,005 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:13:08,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=7d13a1d92f4b47b752ff985be8bc0775, regionState=CLOSED 2024-12-11T06:13:08,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:13:08,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:08,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-11T06:13:08,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 5de43fee139c35f78f381d985b89b0e2, server=b66397bb9319,42579,1733897428663 in 193 msec 2024-12-11T06:13:08,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-11T06:13:08,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 7d13a1d92f4b47b752ff985be8bc0775, server=b66397bb9319,35939,1733897428441 in 197 msec 2024-12-11T06:13:08,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=5de43fee139c35f78f381d985b89b0e2, UNASSIGN in 206 msec 2024-12-11T06:13:08,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=58, resume processing ppid=57 2024-12-11T06:13:08,022 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7d13a1d92f4b47b752ff985be8bc0775, UNASSIGN in 209 msec 2024-12-11T06:13:08,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-11T06:13:08,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 216 msec 2024-12-11T06:13:08,028 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897588027"}]},"ts":"1733897588027"} 2024-12-11T06:13:08,031 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-11T06:13:08,031 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-11T06:13:08,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 240 msec 2024-12-11T06:13:08,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-11T06:13:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-11T06:13:08,127 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T06:13:08,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-11T06:13:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,132 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-11T06:13:08,133 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,142 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-11T06:13:08,142 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:13:08,142 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:13:08,145 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/recovered.edits] 2024-12-11T06:13:08,145 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/recovered.edits] 2024-12-11T06:13:08,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T06:13:08,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T06:13:08,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T06:13:08,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-11T06:13:08,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-11T06:13:08,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-11T06:13:08,153 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/cf/da8fa6b187b64266a4d97e4c8655ef77 2024-12-11T06:13:08,153 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/cf/8e3bccba89914e129f67edc156d72b85 2024-12-11T06:13:08,158 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2/recovered.edits/9.seqid 2024-12-11T06:13:08,158 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:13:08,159 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775/recovered.edits/9.seqid 2024-12-11T06:13:08,159 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithTargetName/7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:13:08,160 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-11T06:13:08,160 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-11T06:13:08,161 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-11T06:13:08,166 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241211055c96f4260a40c2aac7c0a804c77c13_5de43fee139c35f78f381d985b89b0e2 2024-12-11T06:13:08,166 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412119e2b9301b06a4f9c836f545f47beab41_7d13a1d92f4b47b752ff985be8bc0775 2024-12-11T06:13:08,167 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-11T06:13:08,170 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,173 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-11T06:13:08,177 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-11T06:13:08,178 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,178 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-11T06:13:08,178 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897588178"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:08,178 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897588178"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:08,181 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:13:08,181 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7d13a1d92f4b47b752ff985be8bc0775, NAME => 'testtb-testExportWithTargetName,,1733897564083.7d13a1d92f4b47b752ff985be8bc0775.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5de43fee139c35f78f381d985b89b0e2, NAME => 'testtb-testExportWithTargetName,1,1733897564083.5de43fee139c35f78f381d985b89b0e2.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:13:08,181 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-11T06:13:08,181 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897588181"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:08,183 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-11T06:13:08,184 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-11T06:13:08,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 56 msec 2024-12-11T06:13:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-11T06:13:08,257 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-11T06:13:08,257 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-11T06:13:08,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T06:13:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-11T06:13:08,271 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-11T06:13:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-11T06:13:08,310 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=789 (was 761) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_894438821_1 at /127.0.0.1:54784 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:54820 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43345 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2086 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:49430 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:35497 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35497 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 30874) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45169 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_894438821_1 at /127.0.0.1:39046 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45169 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45169 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 770) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=683 (was 519) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2213 (was 760) - AvailableMemoryMB LEAK? - 2024-12-11T06:13:08,310 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-11T06:13:08,331 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=789, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=683, ProcessCount=20, AvailableMemoryMB=2212 2024-12-11T06:13:08,332 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=789 is superior to 500 2024-12-11T06:13:08,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:13:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:08,337 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:13:08,338 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-11T06:13:08,340 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:13:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T06:13:08,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741968_1144 (size=440) 2024-12-11T06:13:08,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741968_1144 (size=440) 2024-12-11T06:13:08,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741968_1144 (size=440) 2024-12-11T06:13:08,378 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 35698d8c3901cf8ffdf9dd340712898c, NAME => 'testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:08,381 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 57e715a65e88185b1975130a77dde34b, NAME => 'testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:08,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741969_1145 (size=65) 2024-12-11T06:13:08,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741969_1145 (size=65) 2024-12-11T06:13:08,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741969_1145 (size=65) 2024-12-11T06:13:08,411 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:08,411 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 35698d8c3901cf8ffdf9dd340712898c, disabling compactions & flushes 2024-12-11T06:13:08,411 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,411 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,411 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. after waiting 0 ms 2024-12-11T06:13:08,412 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,412 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,412 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 35698d8c3901cf8ffdf9dd340712898c: Waiting for close lock at 1733897588411Disabling compacts and flushes for region at 1733897588411Disabling writes for close at 1733897588412 (+1 ms)Writing region close event to WAL at 1733897588412Closed at 1733897588412 2024-12-11T06:13:08,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741970_1146 (size=65) 2024-12-11T06:13:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741970_1146 (size=65) 2024-12-11T06:13:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741970_1146 (size=65) 2024-12-11T06:13:08,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:08,421 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 57e715a65e88185b1975130a77dde34b, disabling compactions & flushes 2024-12-11T06:13:08,422 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,422 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,422 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. after waiting 0 ms 2024-12-11T06:13:08,422 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,422 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,422 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 57e715a65e88185b1975130a77dde34b: Waiting for close lock at 1733897588421Disabling compacts and flushes for region at 1733897588421Disabling writes for close at 1733897588422 (+1 ms)Writing region close event to WAL at 1733897588422Closed at 1733897588422 2024-12-11T06:13:08,424 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:13:08,424 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897588424"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897588424"}]},"ts":"1733897588424"} 2024-12-11T06:13:08,424 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897588424"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897588424"}]},"ts":"1733897588424"} 2024-12-11T06:13:08,428 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:13:08,431 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:13:08,431 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897588431"}]},"ts":"1733897588431"} 2024-12-11T06:13:08,435 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T06:13:08,435 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:13:08,437 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:13:08,437 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:13:08,437 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:13:08,438 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:13:08,438 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:13:08,438 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:13:08,438 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:13:08,438 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:13:08,438 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:13:08,438 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:13:08,438 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, ASSIGN}] 2024-12-11T06:13:08,440 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, ASSIGN 2024-12-11T06:13:08,441 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, ASSIGN 2024-12-11T06:13:08,444 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:13:08,445 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:13:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T06:13:08,595 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:13:08,595 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=35698d8c3901cf8ffdf9dd340712898c, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:08,595 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=57e715a65e88185b1975130a77dde34b, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:08,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, ASSIGN because future has completed 2024-12-11T06:13:08,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:08,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, ASSIGN because future has completed 2024-12-11T06:13:08,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:08,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T06:13:08,754 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,755 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 57e715a65e88185b1975130a77dde34b, NAME => 'testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:13:08,755 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. service=AccessControlService 2024-12-11T06:13:08,756 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:08,756 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,756 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:08,756 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,756 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,758 INFO [StoreOpener-57e715a65e88185b1975130a77dde34b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,758 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,758 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 35698d8c3901cf8ffdf9dd340712898c, NAME => 'testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:13:08,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. service=AccessControlService 2024-12-11T06:13:08,759 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:08,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:08,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,759 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,760 INFO [StoreOpener-57e715a65e88185b1975130a77dde34b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 57e715a65e88185b1975130a77dde34b columnFamilyName cf 2024-12-11T06:13:08,761 INFO [StoreOpener-35698d8c3901cf8ffdf9dd340712898c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,761 DEBUG [StoreOpener-57e715a65e88185b1975130a77dde34b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:08,762 INFO [StoreOpener-57e715a65e88185b1975130a77dde34b-1 {}] regionserver.HStore(327): Store=57e715a65e88185b1975130a77dde34b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:08,762 INFO [StoreOpener-35698d8c3901cf8ffdf9dd340712898c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 35698d8c3901cf8ffdf9dd340712898c columnFamilyName cf 2024-12-11T06:13:08,762 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,763 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,764 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,764 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,764 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,765 DEBUG [StoreOpener-35698d8c3901cf8ffdf9dd340712898c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:08,766 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,767 INFO [StoreOpener-35698d8c3901cf8ffdf9dd340712898c-1 {}] regionserver.HStore(327): Store=35698d8c3901cf8ffdf9dd340712898c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:08,769 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,769 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,770 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:08,770 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,770 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 57e715a65e88185b1975130a77dde34b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63330775, jitterRate=-0.0562979131937027}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:08,770 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:08,771 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 57e715a65e88185b1975130a77dde34b: Running coprocessor pre-open hook at 1733897588756Writing region info on filesystem at 1733897588756Initializing all the Stores at 1733897588757 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897588757Cleaning up temporary data from old regions at 1733897588764 (+7 ms)Running coprocessor post-open hooks at 1733897588770 (+6 ms)Region opened successfully at 1733897588771 (+1 ms) 2024-12-11T06:13:08,772 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,772 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,772 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b., pid=66, masterSystemTime=1733897588750 2024-12-11T06:13:08,774 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,775 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,775 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:08,776 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=57e715a65e88185b1975130a77dde34b, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:08,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:08,781 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:08,781 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 35698d8c3901cf8ffdf9dd340712898c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65859625, jitterRate=-0.018615111708641052}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:08,781 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:08,781 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 35698d8c3901cf8ffdf9dd340712898c: Running coprocessor pre-open hook at 1733897588759Writing region info on filesystem at 1733897588759Initializing all the Stores at 1733897588760 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897588760Cleaning up temporary data from old regions at 1733897588772 (+12 ms)Running coprocessor post-open hooks at 1733897588781 (+9 ms)Region opened successfully at 1733897588781 2024-12-11T06:13:08,783 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c., pid=67, masterSystemTime=1733897588751 2024-12-11T06:13:08,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-12-11T06:13:08,791 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=35698d8c3901cf8ffdf9dd340712898c, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:08,793 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, ASSIGN in 352 msec 2024-12-11T06:13:08,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441 in 181 msec 2024-12-11T06:13:08,794 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:08,794 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,794 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-12-11T06:13:08,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597 in 196 msec 2024-12-11T06:13:08,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-11T06:13:08,801 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, ASSIGN in 360 msec 2024-12-11T06:13:08,804 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:13:08,804 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897588804"}]},"ts":"1733897588804"} 2024-12-11T06:13:08,807 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T06:13:08,808 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:13:08,809 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-11T06:13:08,812 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T06:13:08,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,815 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:08,817 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:08,817 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:08,817 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:08,818 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:08,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 484 msec 2024-12-11T06:13:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-11T06:13:08,967 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T06:13:08,968 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:08,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-11T06:13:08,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:08,972 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:08,974 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:08,982 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:08,988 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45796, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:08,992 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:08,996 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T06:13:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897588996 (current time:1733897588996). 2024-12-11T06:13:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T06:13:08,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:08,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32b4062a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:08,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:08,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:08,999 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:08,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:08,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:09,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d05e507, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:09,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:09,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,001 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32840, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:09,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@542a9049, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:09,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:09,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:09,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:09,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:09,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,007 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de66d20, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:09,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:09,013 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:09,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:09,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:09,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fbf42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:09,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:09,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,015 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:09,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40b4d7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:09,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:09,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:09,018 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:09,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:09,022 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,023 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T06:13:09,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T06:13:09,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T06:13:09,029 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:09,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T06:13:09,030 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:09,033 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:09,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741971_1147 (size=161) 2024-12-11T06:13:09,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741971_1147 (size=161) 2024-12-11T06:13:09,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741971_1147 (size=161) 2024-12-11T06:13:09,065 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:09,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b}] 2024-12-11T06:13:09,070 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,070 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T06:13:09,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-11T06:13:09,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-11T06:13:09,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:09,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 57e715a65e88185b1975130a77dde34b: 2024-12-11T06:13:09,223 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 35698d8c3901cf8ffdf9dd340712898c: 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:09,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:09,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741972_1148 (size=68) 2024-12-11T06:13:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741972_1148 (size=68) 2024-12-11T06:13:09,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741972_1148 (size=68) 2024-12-11T06:13:09,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:09,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-11T06:13:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-11T06:13:09,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,243 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741973_1149 (size=68) 2024-12-11T06:13:09,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741973_1149 (size=68) 2024-12-11T06:13:09,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741973_1149 (size=68) 2024-12-11T06:13:09,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b in 179 msec 2024-12-11T06:13:09,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:09,252 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-11T06:13:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-11T06:13:09,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,253 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-11T06:13:09,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c in 190 msec 2024-12-11T06:13:09,257 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:09,259 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:09,260 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:09,260 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:09,260 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:09,261 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:13:09,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741974_1150 (size=60) 2024-12-11T06:13:09,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741974_1150 (size=60) 2024-12-11T06:13:09,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741974_1150 (size=60) 2024-12-11T06:13:09,270 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:09,270 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:09,271 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:09,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741975_1151 (size=641) 2024-12-11T06:13:09,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741975_1151 (size=641) 2024-12-11T06:13:09,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741975_1151 (size=641) 2024-12-11T06:13:09,286 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:09,293 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:09,293 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:09,295 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:09,295 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-11T06:13:09,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 268 msec 2024-12-11T06:13:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-11T06:13:09,347 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T06:13:09,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:09,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:09,364 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:09,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-11T06:13:09,369 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:09,369 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:09,371 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:09,378 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:09,389 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:09,393 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T06:13:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897589393 (current time:1733897589393). 2024-12-11T06:13:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T06:13:09,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2862ef04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:09,395 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:09,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:09,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:09,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bfe898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:09,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:09,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,397 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:09,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@213523be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:09,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:09,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:09,400 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:09,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,405 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:09,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dc7990a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:09,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:09,413 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:09,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:09,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:09,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b91e5bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,413 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:09,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:09,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,415 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:09,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27d9a1c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:09,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:09,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:09,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:09,420 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:09,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:09,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:09,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T06:13:09,425 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:09,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:09,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-11T06:13:09,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T06:13:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T06:13:09,429 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:09,430 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:09,432 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:09,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741976_1152 (size=156) 2024-12-11T06:13:09,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741976_1152 (size=156) 2024-12-11T06:13:09,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741976_1152 (size=156) 2024-12-11T06:13:09,448 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:09,448 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b}] 2024-12-11T06:13:09,449 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,449 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T06:13:09,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-11T06:13:09,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-11T06:13:09,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:09,602 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:09,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 57e715a65e88185b1975130a77dde34b 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T06:13:09,602 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 35698d8c3901cf8ffdf9dd340712898c 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T06:13:09,621 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c is 71, key is 07afe9e0e0e555957c55e34c1cc5335e/cf:q/1733897589359/Put/seqid=0 2024-12-11T06:13:09,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b is 71, key is 1139bc0cf819523f74531cd5346464b9/cf:q/1733897589361/Put/seqid=0 2024-12-11T06:13:09,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741977_1153 (size=5032) 2024-12-11T06:13:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741977_1153 (size=5032) 2024-12-11T06:13:09,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741977_1153 (size=5032) 2024-12-11T06:13:09,628 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:09,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741978_1154 (size=8241) 2024-12-11T06:13:09,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741978_1154 (size=8241) 2024-12-11T06:13:09,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741978_1154 (size=8241) 2024-12-11T06:13:09,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:09,635 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/.tmp/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29, store: [table=testtb-testExportWithResetTtl family=cf region=35698d8c3901cf8ffdf9dd340712898c] 2024-12-11T06:13:09,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/.tmp/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 is 206, key is 0f4c14ce38a402390ec54f540790ee2e6/cf:q/1733897589359/Put/seqid=0 2024-12-11T06:13:09,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/.tmp/cf/4982bac5bf0c4bddaa37f0818171ac75, store: [table=testtb-testExportWithResetTtl family=cf region=57e715a65e88185b1975130a77dde34b] 2024-12-11T06:13:09,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/.tmp/cf/4982bac5bf0c4bddaa37f0818171ac75 is 206, key is 1f6f2880a5ce526593b4d3f0038afa463/cf:q/1733897589361/Put/seqid=0 2024-12-11T06:13:09,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741979_1155 (size=5700) 2024-12-11T06:13:09,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741979_1155 (size=5700) 2024-12-11T06:13:09,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741979_1155 (size=5700) 2024-12-11T06:13:09,645 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/.tmp/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 2024-12-11T06:13:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741980_1156 (size=15055) 2024-12-11T06:13:09,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741980_1156 (size=15055) 2024-12-11T06:13:09,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741980_1156 (size=15055) 2024-12-11T06:13:09,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/.tmp/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 2024-12-11T06:13:09,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/.tmp/cf/4982bac5bf0c4bddaa37f0818171ac75 2024-12-11T06:13:09,658 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29, entries=2, sequenceid=6, filesize=5.6 K 2024-12-11T06:13:09,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 35698d8c3901cf8ffdf9dd340712898c in 57ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:09,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 35698d8c3901cf8ffdf9dd340712898c: 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. for snaptb0-testExportWithResetTtl completed. 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29] hfiles 2024-12-11T06:13:09,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/.tmp/cf/4982bac5bf0c4bddaa37f0818171ac75 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75 2024-12-11T06:13:09,669 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75, entries=48, sequenceid=6, filesize=14.7 K 2024-12-11T06:13:09,670 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 57e715a65e88185b1975130a77dde34b in 68ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:09,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 57e715a65e88185b1975130a77dde34b: 2024-12-11T06:13:09,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. for snaptb0-testExportWithResetTtl completed. 2024-12-11T06:13:09,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:09,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75] hfiles 2024-12-11T06:13:09,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75 for snapshot=snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741981_1157 (size=107) 2024-12-11T06:13:09,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741981_1157 (size=107) 2024-12-11T06:13:09,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741981_1157 (size=107) 2024-12-11T06:13:09,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:09,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-11T06:13:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-11T06:13:09,679 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,679 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 35698d8c3901cf8ffdf9dd340712898c in 233 msec 2024-12-11T06:13:09,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741982_1158 (size=107) 2024-12-11T06:13:09,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741982_1158 (size=107) 2024-12-11T06:13:09,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741982_1158 (size=107) 2024-12-11T06:13:09,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:09,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-11T06:13:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-11T06:13:09,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,695 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-11T06:13:09,698 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 57e715a65e88185b1975130a77dde34b in 248 msec 2024-12-11T06:13:09,698 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:09,699 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:09,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:09,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:09,700 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:09,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c] hfiles 2024-12-11T06:13:09,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:09,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:09,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741983_1159 (size=291) 2024-12-11T06:13:09,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741983_1159 (size=291) 2024-12-11T06:13:09,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741983_1159 (size=291) 2024-12-11T06:13:09,716 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:09,716 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,717 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741984_1160 (size=951) 2024-12-11T06:13:09,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741984_1160 (size=951) 2024-12-11T06:13:09,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741984_1160 (size=951) 2024-12-11T06:13:09,730 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:09,736 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:09,736 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-11T06:13:09,738 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:09,738 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-11T06:13:09,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 312 msec 2024-12-11T06:13:09,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-11T06:13:09,746 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T06:13:09,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:13:09,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:09,750 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:13:09,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-11T06:13:09,751 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:13:09,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T06:13:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741985_1161 (size=433) 2024-12-11T06:13:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741985_1161 (size=433) 2024-12-11T06:13:09,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741985_1161 (size=433) 2024-12-11T06:13:09,763 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 209a9442d0247b7a2fff3efae53ff5eb, NAME => 'testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:09,764 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 24bf8a85cf0b45ab40a127b62fbd902d, NAME => 'testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:09,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741986_1162 (size=58) 2024-12-11T06:13:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741987_1163 (size=58) 2024-12-11T06:13:09,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741986_1162 (size=58) 2024-12-11T06:13:09,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741987_1163 (size=58) 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:09,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741986_1162 (size=58) 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 209a9442d0247b7a2fff3efae53ff5eb, disabling compactions & flushes 2024-12-11T06:13:09,775 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:09,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741987_1163 (size=58) 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. after waiting 0 ms 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:09,775 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 209a9442d0247b7a2fff3efae53ff5eb: Waiting for close lock at 1733897589775Disabling compacts and flushes for region at 1733897589775Disabling writes for close at 1733897589775Writing region close event to WAL at 1733897589775Closed at 1733897589775 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 24bf8a85cf0b45ab40a127b62fbd902d, disabling compactions & flushes 2024-12-11T06:13:09,775 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. after waiting 0 ms 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:09,775 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:09,775 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 24bf8a85cf0b45ab40a127b62fbd902d: Waiting for close lock at 1733897589775Disabling compacts and flushes for region at 1733897589775Disabling writes for close at 1733897589775Writing region close event to WAL at 1733897589775Closed at 1733897589775 2024-12-11T06:13:09,777 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:13:09,777 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733897589777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897589777"}]},"ts":"1733897589777"} 2024-12-11T06:13:09,777 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733897589777"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897589777"}]},"ts":"1733897589777"} 2024-12-11T06:13:09,779 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:13:09,780 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:13:09,780 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897589780"}]},"ts":"1733897589780"} 2024-12-11T06:13:09,782 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-11T06:13:09,783 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:13:09,784 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:13:09,784 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:13:09,784 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:13:09,784 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:13:09,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, ASSIGN}] 2024-12-11T06:13:09,786 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, ASSIGN 2024-12-11T06:13:09,786 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, ASSIGN 2024-12-11T06:13:09,786 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:13:09,786 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:13:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T06:13:09,937 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:13:09,937 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=24bf8a85cf0b45ab40a127b62fbd902d, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:09,937 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=209a9442d0247b7a2fff3efae53ff5eb, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:09,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, ASSIGN because future has completed 2024-12-11T06:13:09,940 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:09,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, ASSIGN because future has completed 2024-12-11T06:13:09,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:10,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T06:13:10,094 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:10,095 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 24bf8a85cf0b45ab40a127b62fbd902d, NAME => 'testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:13:10,095 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. service=AccessControlService 2024-12-11T06:13:10,095 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,096 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 209a9442d0247b7a2fff3efae53ff5eb, NAME => 'testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:13:10,096 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. service=AccessControlService 2024-12-11T06:13:10,097 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:10,097 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,097 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:10,097 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,097 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,097 INFO [StoreOpener-24bf8a85cf0b45ab40a127b62fbd902d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,098 INFO [StoreOpener-209a9442d0247b7a2fff3efae53ff5eb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,099 INFO [StoreOpener-24bf8a85cf0b45ab40a127b62fbd902d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 24bf8a85cf0b45ab40a127b62fbd902d columnFamilyName cf 2024-12-11T06:13:10,099 INFO [StoreOpener-209a9442d0247b7a2fff3efae53ff5eb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 209a9442d0247b7a2fff3efae53ff5eb columnFamilyName cf 2024-12-11T06:13:10,100 DEBUG [StoreOpener-24bf8a85cf0b45ab40a127b62fbd902d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:10,100 DEBUG [StoreOpener-209a9442d0247b7a2fff3efae53ff5eb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:10,100 INFO [StoreOpener-209a9442d0247b7a2fff3efae53ff5eb-1 {}] regionserver.HStore(327): Store=209a9442d0247b7a2fff3efae53ff5eb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:10,100 INFO [StoreOpener-24bf8a85cf0b45ab40a127b62fbd902d-1 {}] regionserver.HStore(327): Store=24bf8a85cf0b45ab40a127b62fbd902d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:10,101 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,101 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,102 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,102 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,102 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,102 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,103 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,103 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,103 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,103 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,104 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,105 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,107 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:10,107 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:10,107 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 209a9442d0247b7a2fff3efae53ff5eb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63535719, jitterRate=-0.053244009613990784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:10,108 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,108 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 24bf8a85cf0b45ab40a127b62fbd902d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68495342, jitterRate=0.02066013216972351}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:10,108 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,108 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 24bf8a85cf0b45ab40a127b62fbd902d: Running coprocessor pre-open hook at 1733897590096Writing region info on filesystem at 1733897590096Initializing all the Stores at 1733897590097 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897590097Cleaning up temporary data from old regions at 1733897590103 (+6 ms)Running coprocessor post-open hooks at 1733897590108 (+5 ms)Region opened successfully at 1733897590108 2024-12-11T06:13:10,108 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 209a9442d0247b7a2fff3efae53ff5eb: Running coprocessor pre-open hook at 1733897590097Writing region info on filesystem at 1733897590097Initializing all the Stores at 1733897590098 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897590098Cleaning up temporary data from old regions at 1733897590103 (+5 ms)Running coprocessor post-open hooks at 1733897590108 (+5 ms)Region opened successfully at 1733897590108 2024-12-11T06:13:10,109 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d., pid=77, masterSystemTime=1733897590091 2024-12-11T06:13:10,109 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb., pid=78, masterSystemTime=1733897590093 2024-12-11T06:13:10,111 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,111 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,112 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=209a9442d0247b7a2fff3efae53ff5eb, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:10,112 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:10,113 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:10,114 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=24bf8a85cf0b45ab40a127b62fbd902d, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:10,114 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:10,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:10,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-11T06:13:10,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597 in 174 msec 2024-12-11T06:13:10,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-11T06:13:10,118 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441 in 176 msec 2024-12-11T06:13:10,119 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, ASSIGN in 333 msec 2024-12-11T06:13:10,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-11T06:13:10,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, ASSIGN in 334 msec 2024-12-11T06:13:10,121 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:13:10,121 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897590121"}]},"ts":"1733897590121"} 2024-12-11T06:13:10,123 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-11T06:13:10,123 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:13:10,124 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-11T06:13:10,126 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T06:13:10,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:10,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:10,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:10,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:10,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,135 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 386 msec 2024-12-11T06:13:10,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-11T06:13:10,377 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-11T06:13:10,377 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,380 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-11T06:13:10,380 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,381 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:10,383 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,387 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,392 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:10,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:10,409 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,411 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-11T06:13:10,411 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,412 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:10,413 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,423 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-11T06:13:10,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T06:13:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897590426 (current time:1733897590426). 2024-12-11T06:13:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-11T06:13:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4770af13, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:10,428 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:10,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:10,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:10,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71fe6d4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:10,428 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:10,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,429 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32898, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:10,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2368919f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:10,431 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:10,432 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:10,432 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:10,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:10,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,434 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59cd5c38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:10,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:10,436 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:10,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:10,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:10,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b859f82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:10,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:10,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,437 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:10,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49d63ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:10,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:10,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:10,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:10,441 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:10,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:10,443 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:10,444 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:10,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-11T06:13:10,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-11T06:13:10,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T06:13:10,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T06:13:10,447 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:10,448 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:10,451 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:10,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741988_1164 (size=143) 2024-12-11T06:13:10,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741988_1164 (size=143) 2024-12-11T06:13:10,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741988_1164 (size=143) 2024-12-11T06:13:10,460 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:10,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d}] 2024-12-11T06:13:10,461 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T06:13:10,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-11T06:13:10,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-11T06:13:10,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:10,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 209a9442d0247b7a2fff3efae53ff5eb 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T06:13:10,614 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 24bf8a85cf0b45ab40a127b62fbd902d 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T06:13:10,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb is 71, key is 0c650d13478f1982644c3024d716ee44/cf:q/1733897590404/Put/seqid=0 2024-12-11T06:13:10,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d is 71, key is 127cc7f66159fc89d30a4ae19112be1c/cf:q/1733897590407/Put/seqid=0 2024-12-11T06:13:10,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741989_1165 (size=5032) 2024-12-11T06:13:10,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741989_1165 (size=5032) 2024-12-11T06:13:10,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741989_1165 (size=5032) 2024-12-11T06:13:10,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741990_1166 (size=8241) 2024-12-11T06:13:10,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741990_1166 (size=8241) 2024-12-11T06:13:10,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:10,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741990_1166 (size=8241) 2024-12-11T06:13:10,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:10,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/.tmp/cf/63c180d6e96e4780965dcd34297b299b, store: [table=testExportWithResetTtl family=cf region=24bf8a85cf0b45ab40a127b62fbd902d] 2024-12-11T06:13:10,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/.tmp/cf/93d6ba78b5c04290a28a34fb6563e4f4, store: [table=testExportWithResetTtl family=cf region=209a9442d0247b7a2fff3efae53ff5eb] 2024-12-11T06:13:10,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/.tmp/cf/63c180d6e96e4780965dcd34297b299b is 199, key is 13d648549fa845f7ea885e36370464143/cf:q/1733897590407/Put/seqid=0 2024-12-11T06:13:10,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/.tmp/cf/93d6ba78b5c04290a28a34fb6563e4f4 is 199, key is 08d5481b36bf9bf4a08d22b016bac3fe9/cf:q/1733897590404/Put/seqid=0 2024-12-11T06:13:10,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741991_1167 (size=14712) 2024-12-11T06:13:10,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741991_1167 (size=14712) 2024-12-11T06:13:10,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741992_1168 (size=5679) 2024-12-11T06:13:10,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741991_1167 (size=14712) 2024-12-11T06:13:10,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741992_1168 (size=5679) 2024-12-11T06:13:10,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/.tmp/cf/63c180d6e96e4780965dcd34297b299b 2024-12-11T06:13:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741992_1168 (size=5679) 2024-12-11T06:13:10,660 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/.tmp/cf/93d6ba78b5c04290a28a34fb6563e4f4 2024-12-11T06:13:10,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/.tmp/cf/63c180d6e96e4780965dcd34297b299b as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b 2024-12-11T06:13:10,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/.tmp/cf/93d6ba78b5c04290a28a34fb6563e4f4 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4 2024-12-11T06:13:10,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b, entries=48, sequenceid=5, filesize=14.4 K 2024-12-11T06:13:10,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4, entries=2, sequenceid=5, filesize=5.5 K 2024-12-11T06:13:10,673 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 209a9442d0247b7a2fff3efae53ff5eb in 59ms, sequenceid=5, compaction requested=false 2024-12-11T06:13:10,673 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 24bf8a85cf0b45ab40a127b62fbd902d in 59ms, sequenceid=5, compaction requested=false 2024-12-11T06:13:10,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-11T06:13:10,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 24bf8a85cf0b45ab40a127b62fbd902d: 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 209a9442d0247b7a2fff3efae53ff5eb: 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. for snaptb-testExportWithResetTtl completed. 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. for snaptb-testExportWithResetTtl completed. 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b] hfiles 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b for snapshot=snaptb-testExportWithResetTtl 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4] hfiles 2024-12-11T06:13:10,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4 for snapshot=snaptb-testExportWithResetTtl 2024-12-11T06:13:10,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741994_1170 (size=100) 2024-12-11T06:13:10,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741994_1170 (size=100) 2024-12-11T06:13:10,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741994_1170 (size=100) 2024-12-11T06:13:10,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741993_1169 (size=100) 2024-12-11T06:13:10,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:10,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-11T06:13:10,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741993_1169 (size=100) 2024-12-11T06:13:10,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-11T06:13:10,684 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741993_1169 (size=100) 2024-12-11T06:13:10,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:10,685 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-11T06:13:10,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-11T06:13:10,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,686 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,687 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb in 225 msec 2024-12-11T06:13:10,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-11T06:13:10,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d in 226 msec 2024-12-11T06:13:10,688 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:10,689 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:10,690 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:10,690 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:10,690 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:10,691 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb] hfiles 2024-12-11T06:13:10,691 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:10,691 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:10,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741995_1171 (size=284) 2024-12-11T06:13:10,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741995_1171 (size=284) 2024-12-11T06:13:10,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741995_1171 (size=284) 2024-12-11T06:13:10,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:10,699 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-11T06:13:10,700 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T06:13:10,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741996_1172 (size=923) 2024-12-11T06:13:10,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741996_1172 (size=923) 2024-12-11T06:13:10,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741996_1172 (size=923) 2024-12-11T06:13:10,713 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:10,719 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:10,720 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T06:13:10,721 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:10,721 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-11T06:13:10,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 276 msec 2024-12-11T06:13:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-11T06:13:10,767 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-11T06:13:10,777 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777 2024-12-11T06:13:10,777 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:10,808 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:10,808 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T06:13:10,810 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:13:10,816 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-11T06:13:10,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741997_1173 (size=143) 2024-12-11T06:13:10,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741997_1173 (size=143) 2024-12-11T06:13:10,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741997_1173 (size=143) 2024-12-11T06:13:10,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741998_1174 (size=923) 2024-12-11T06:13:10,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741998_1174 (size=923) 2024-12-11T06:13:10,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741998_1174 (size=923) 2024-12-11T06:13:10,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741999_1175 (size=141) 2024-12-11T06:13:10,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741999_1175 (size=141) 2024-12-11T06:13:10,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741999_1175 (size=141) 2024-12-11T06:13:10,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:10,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:10,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,923 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-17696458019823554821.jar 2024-12-11T06:13:11,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,924 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-8146165905226168615.jar 2024-12-11T06:13:11,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:11,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:13:11,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:13:11,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:13:11,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:13:11,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:13:11,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:13:11,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:13:11,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:13:11,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:13:11,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:13:11,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:13:11,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:11,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:11,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:11,997 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:11,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:11,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:11,998 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:12,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742000_1176 (size=131440) 2024-12-11T06:13:12,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742000_1176 (size=131440) 2024-12-11T06:13:12,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742000_1176 (size=131440) 2024-12-11T06:13:12,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T06:13:12,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T06:13:12,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742001_1177 (size=4188619) 2024-12-11T06:13:12,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T06:13:12,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T06:13:12,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742002_1178 (size=1323991) 2024-12-11T06:13:12,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742003_1179 (size=903542) 2024-12-11T06:13:12,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742003_1179 (size=903542) 2024-12-11T06:13:12,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742003_1179 (size=903542) 2024-12-11T06:13:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T06:13:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T06:13:12,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742004_1180 (size=8360360) 2024-12-11T06:13:12,144 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0002_000001 (auth:SIMPLE) from 127.0.0.1:50578 2024-12-11T06:13:12,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742005_1181 (size=443172) 2024-12-11T06:13:12,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742005_1181 (size=443172) 2024-12-11T06:13:12,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742005_1181 (size=443172) 2024-12-11T06:13:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742006_1182 (size=1877034) 2024-12-11T06:13:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742006_1182 (size=1877034) 2024-12-11T06:13:12,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742006_1182 (size=1877034) 2024-12-11T06:13:12,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000001/launch_container.sh] 2024-12-11T06:13:12,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000001/container_tokens] 2024-12-11T06:13:12,166 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0002/container_1733897436125_0002_01_000001/sysfs] 2024-12-11T06:13:12,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742007_1183 (size=77835) 2024-12-11T06:13:12,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742007_1183 (size=77835) 2024-12-11T06:13:12,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742007_1183 (size=77835) 2024-12-11T06:13:12,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742008_1184 (size=30949) 2024-12-11T06:13:12,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742008_1184 (size=30949) 2024-12-11T06:13:12,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742008_1184 (size=30949) 2024-12-11T06:13:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T06:13:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T06:13:12,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742009_1185 (size=1597213) 2024-12-11T06:13:12,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T06:13:12,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T06:13:12,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742010_1186 (size=4695811) 2024-12-11T06:13:12,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742011_1187 (size=232957) 2024-12-11T06:13:12,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742011_1187 (size=232957) 2024-12-11T06:13:12,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742011_1187 (size=232957) 2024-12-11T06:13:12,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742012_1188 (size=127628) 2024-12-11T06:13:12,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742012_1188 (size=127628) 2024-12-11T06:13:12,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742012_1188 (size=127628) 2024-12-11T06:13:12,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742013_1189 (size=20406) 2024-12-11T06:13:12,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742013_1189 (size=20406) 2024-12-11T06:13:12,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742013_1189 (size=20406) 2024-12-11T06:13:12,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742014_1190 (size=6426220) 2024-12-11T06:13:12,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742014_1190 (size=6426220) 2024-12-11T06:13:12,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742014_1190 (size=6426220) 2024-12-11T06:13:12,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742015_1191 (size=5175431) 2024-12-11T06:13:12,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742015_1191 (size=5175431) 2024-12-11T06:13:12,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742015_1191 (size=5175431) 2024-12-11T06:13:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742016_1192 (size=217634) 2024-12-11T06:13:12,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742016_1192 (size=217634) 2024-12-11T06:13:12,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742016_1192 (size=217634) 2024-12-11T06:13:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742017_1193 (size=1832290) 2024-12-11T06:13:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742017_1193 (size=1832290) 2024-12-11T06:13:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742017_1193 (size=1832290) 2024-12-11T06:13:12,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742018_1194 (size=322274) 2024-12-11T06:13:12,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742018_1194 (size=322274) 2024-12-11T06:13:12,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742018_1194 (size=322274) 2024-12-11T06:13:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742019_1195 (size=503880) 2024-12-11T06:13:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742019_1195 (size=503880) 2024-12-11T06:13:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742019_1195 (size=503880) 2024-12-11T06:13:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742020_1196 (size=29229) 2024-12-11T06:13:12,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742020_1196 (size=29229) 2024-12-11T06:13:12,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742020_1196 (size=29229) 2024-12-11T06:13:12,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742021_1197 (size=24096) 2024-12-11T06:13:12,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742021_1197 (size=24096) 2024-12-11T06:13:12,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742021_1197 (size=24096) 2024-12-11T06:13:12,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742022_1198 (size=111872) 2024-12-11T06:13:12,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742022_1198 (size=111872) 2024-12-11T06:13:12,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742022_1198 (size=111872) 2024-12-11T06:13:12,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742023_1199 (size=45609) 2024-12-11T06:13:12,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742023_1199 (size=45609) 2024-12-11T06:13:12,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742023_1199 (size=45609) 2024-12-11T06:13:12,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742024_1200 (size=136454) 2024-12-11T06:13:12,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742024_1200 (size=136454) 2024-12-11T06:13:12,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742024_1200 (size=136454) 2024-12-11T06:13:12,384 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:13:12,386 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-11T06:13:12,389 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-11T06:13:12,389 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T06:13:12,389 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.5 K 2024-12-11T06:13:12,389 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-12-11T06:13:12,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742025_1201 (size=995) 2024-12-11T06:13:12,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742025_1201 (size=995) 2024-12-11T06:13:12,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742025_1201 (size=995) 2024-12-11T06:13:12,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742026_1202 (size=35) 2024-12-11T06:13:12,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742026_1202 (size=35) 2024-12-11T06:13:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742026_1202 (size=35) 2024-12-11T06:13:12,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742027_1203 (size=303992) 2024-12-11T06:13:12,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742027_1203 (size=303992) 2024-12-11T06:13:12,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742027_1203 (size=303992) 2024-12-11T06:13:12,441 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:13:12,441 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:13:12,937 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:44090 2024-12-11T06:13:13,531 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:13:18,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T06:13:18,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T06:13:18,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T06:13:18,056 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-11T06:13:18,683 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:33222 2024-12-11T06:13:18,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742028_1204 (size=349690) 2024-12-11T06:13:18,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742028_1204 (size=349690) 2024-12-11T06:13:18,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742028_1204 (size=349690) 2024-12-11T06:13:20,997 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:44104 2024-12-11T06:13:20,997 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:34638 2024-12-11T06:13:21,784 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:44118 2024-12-11T06:13:21,789 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:34644 2024-12-11T06:13:24,157 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:13:26,292 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:13:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742029_1205 (size=14712) 2024-12-11T06:13:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742029_1205 (size=14712) 2024-12-11T06:13:28,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742029_1205 (size=14712) 2024-12-11T06:13:28,334 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000002/launch_container.sh] 2024-12-11T06:13:28,334 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000002/container_tokens] 2024-12-11T06:13:28,334 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000002/sysfs] 2024-12-11T06:13:29,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742031_1207 (size=5679) 2024-12-11T06:13:29,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742031_1207 (size=5679) 2024-12-11T06:13:29,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742031_1207 (size=5679) 2024-12-11T06:13:29,604 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000004/launch_container.sh] 2024-12-11T06:13:29,604 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000004/container_tokens] 2024-12-11T06:13:29,604 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000004/sysfs] 2024-12-11T06:13:30,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742032_1208 (size=5032) 2024-12-11T06:13:30,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742032_1208 (size=5032) 2024-12-11T06:13:30,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742032_1208 (size=5032) 2024-12-11T06:13:30,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742033_1209 (size=8241) 2024-12-11T06:13:30,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742033_1209 (size=8241) 2024-12-11T06:13:30,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742033_1209 (size=8241) 2024-12-11T06:13:30,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742030_1206 (size=31712) 2024-12-11T06:13:30,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742030_1206 (size=31712) 2024-12-11T06:13:30,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742030_1206 (size=31712) 2024-12-11T06:13:30,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742034_1210 (size=462) 2024-12-11T06:13:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742034_1210 (size=462) 2024-12-11T06:13:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742034_1210 (size=462) 2024-12-11T06:13:31,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000003/launch_container.sh] 2024-12-11T06:13:31,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000003/container_tokens] 2024-12-11T06:13:31,012 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000003/sysfs] 2024-12-11T06:13:31,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742035_1211 (size=31712) 2024-12-11T06:13:31,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742035_1211 (size=31712) 2024-12-11T06:13:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742035_1211 (size=31712) 2024-12-11T06:13:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742036_1212 (size=349690) 2024-12-11T06:13:31,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742036_1212 (size=349690) 2024-12-11T06:13:31,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742036_1212 (size=349690) 2024-12-11T06:13:31,148 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:55204 2024-12-11T06:13:31,849 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 24bf8a85cf0b45ab40a127b62fbd902d changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:13:31,849 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 57e715a65e88185b1975130a77dde34b changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:13:31,849 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 209a9442d0247b7a2fff3efae53ff5eb changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:13:31,849 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 35698d8c3901cf8ffdf9dd340712898c changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:13:32,726 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:13:32,727 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:13:32,800 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-11T06:13:32,800 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:13:32,806 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:13:32,806 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T06:13:32,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T06:13:32,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T06:13:32,810 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-11T06:13:32,811 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-11T06:13:32,811 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897590777/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-11T06:13:32,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-11T06:13:32,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:32,859 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897612859"}]},"ts":"1733897612859"} 2024-12-11T06:13:32,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T06:13:32,863 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T06:13:32,863 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-11T06:13:32,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-11T06:13:32,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, UNASSIGN}] 2024-12-11T06:13:32,868 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, UNASSIGN 2024-12-11T06:13:32,869 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, UNASSIGN 2024-12-11T06:13:32,870 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=209a9442d0247b7a2fff3efae53ff5eb, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:32,871 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=24bf8a85cf0b45ab40a127b62fbd902d, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:32,875 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, UNASSIGN because future has completed 2024-12-11T06:13:32,875 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:32,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:32,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, UNASSIGN because future has completed 2024-12-11T06:13:32,879 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:32,880 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:32,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T06:13:33,031 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:33,031 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:33,031 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 209a9442d0247b7a2fff3efae53ff5eb, disabling compactions & flushes 2024-12-11T06:13:33,031 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:33,031 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:33,031 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. after waiting 0 ms 2024-12-11T06:13:33,031 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:33,035 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:33,036 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:33,036 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 24bf8a85cf0b45ab40a127b62fbd902d, disabling compactions & flushes 2024-12-11T06:13:33,036 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:33,036 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:33,036 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. after waiting 0 ms 2024-12-11T06:13:33,036 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:33,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:13:33,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:33,053 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb. 2024-12-11T06:13:33,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 209a9442d0247b7a2fff3efae53ff5eb: Waiting for close lock at 1733897613031Running coprocessor pre-close hooks at 1733897613031Disabling compacts and flushes for region at 1733897613031Disabling writes for close at 1733897613031Writing region close event to WAL at 1733897613041 (+10 ms)Running coprocessor post-close hooks at 1733897613053 (+12 ms)Closed at 1733897613053 2024-12-11T06:13:33,061 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:33,062 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=209a9442d0247b7a2fff3efae53ff5eb, regionState=CLOSED 2024-12-11T06:13:33,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:33,066 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:13:33,067 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:33,067 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d. 2024-12-11T06:13:33,067 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 24bf8a85cf0b45ab40a127b62fbd902d: Waiting for close lock at 1733897613036Running coprocessor pre-close hooks at 1733897613036Disabling compacts and flushes for region at 1733897613036Disabling writes for close at 1733897613036Writing region close event to WAL at 1733897613048 (+12 ms)Running coprocessor post-close hooks at 1733897613067 (+19 ms)Closed at 1733897613067 2024-12-11T06:13:33,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-12-11T06:13:33,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 209a9442d0247b7a2fff3efae53ff5eb, server=b66397bb9319,44539,1733897428597 in 191 msec 2024-12-11T06:13:33,078 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=24bf8a85cf0b45ab40a127b62fbd902d, regionState=CLOSED 2024-12-11T06:13:33,082 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:33,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:33,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=209a9442d0247b7a2fff3efae53ff5eb, UNASSIGN in 210 msec 2024-12-11T06:13:33,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-12-11T06:13:33,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 24bf8a85cf0b45ab40a127b62fbd902d, server=b66397bb9319,35939,1733897428441 in 204 msec 2024-12-11T06:13:33,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-12-11T06:13:33,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=24bf8a85cf0b45ab40a127b62fbd902d, UNASSIGN in 228 msec 2024-12-11T06:13:33,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-11T06:13:33,102 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 235 msec 2024-12-11T06:13:33,104 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897613104"}]},"ts":"1733897613104"} 2024-12-11T06:13:33,109 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T06:13:33,109 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-11T06:13:33,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 261 msec 2024-12-11T06:13:33,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-11T06:13:33,177 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-11T06:13:33,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-11T06:13:33,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,181 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-11T06:13:33,183 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,194 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-11T06:13:33,198 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:33,198 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,202 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/recovered.edits] 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,202 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/recovered.edits] 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:33,203 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:33,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-11T06:13:33,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:33,204 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:33,209 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/cf/93d6ba78b5c04290a28a34fb6563e4f4 2024-12-11T06:13:33,210 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/cf/63c180d6e96e4780965dcd34297b299b 2024-12-11T06:13:33,214 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/recovered.edits/8.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb/recovered.edits/8.seqid 2024-12-11T06:13:33,215 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:33,216 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/recovered.edits/8.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d/recovered.edits/8.seqid 2024-12-11T06:13:33,216 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportWithResetTtl/24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:33,216 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-11T06:13:33,217 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-11T06:13:33,218 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-11T06:13:33,222 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412112352a3d1180946df982f16bfc6857614_209a9442d0247b7a2fff3efae53ff5eb 2024-12-11T06:13:33,223 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241211f41d4bee9da44f3f829cebc235dfb362_24bf8a85cf0b45ab40a127b62fbd902d 2024-12-11T06:13:33,223 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-11T06:13:33,226 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,229 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-11T06:13:33,233 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-11T06:13:33,234 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,234 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-11T06:13:33,234 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897613234"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,234 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897613234"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,237 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:13:33,237 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 209a9442d0247b7a2fff3efae53ff5eb, NAME => 'testExportWithResetTtl,,1733897589747.209a9442d0247b7a2fff3efae53ff5eb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 24bf8a85cf0b45ab40a127b62fbd902d, NAME => 'testExportWithResetTtl,1,1733897589747.24bf8a85cf0b45ab40a127b62fbd902d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:13:33,237 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-11T06:13:33,237 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897613237"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,239 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-11T06:13:33,240 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-11T06:13:33,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 62 msec 2024-12-11T06:13:33,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-11T06:13:33,307 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-11T06:13:33,307 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-11T06:13:33,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-11T06:13:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T06:13:33,314 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897613313"}]},"ts":"1733897613313"} 2024-12-11T06:13:33,317 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-11T06:13:33,317 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-11T06:13:33,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-11T06:13:33,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, UNASSIGN}] 2024-12-11T06:13:33,321 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, UNASSIGN 2024-12-11T06:13:33,321 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, UNASSIGN 2024-12-11T06:13:33,322 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=35698d8c3901cf8ffdf9dd340712898c, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:33,322 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=57e715a65e88185b1975130a77dde34b, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:33,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, UNASSIGN because future has completed 2024-12-11T06:13:33,326 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:33,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:33,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, UNASSIGN because future has completed 2024-12-11T06:13:33,328 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:33,328 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:33,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T06:13:33,480 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:33,480 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:33,481 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 35698d8c3901cf8ffdf9dd340712898c, disabling compactions & flushes 2024-12-11T06:13:33,481 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:33,481 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:33,481 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. after waiting 0 ms 2024-12-11T06:13:33,481 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:33,484 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:33,484 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:33,484 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 57e715a65e88185b1975130a77dde34b, disabling compactions & flushes 2024-12-11T06:13:33,484 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:33,484 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:33,484 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. after waiting 0 ms 2024-12-11T06:13:33,484 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:33,505 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:33,506 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:33,506 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:33,506 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c. 2024-12-11T06:13:33,506 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 35698d8c3901cf8ffdf9dd340712898c: Waiting for close lock at 1733897613480Running coprocessor pre-close hooks at 1733897613480Disabling compacts and flushes for region at 1733897613480Disabling writes for close at 1733897613481 (+1 ms)Writing region close event to WAL at 1733897613482 (+1 ms)Running coprocessor post-close hooks at 1733897613506 (+24 ms)Closed at 1733897613506 2024-12-11T06:13:33,507 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:33,507 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b. 2024-12-11T06:13:33,507 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 57e715a65e88185b1975130a77dde34b: Waiting for close lock at 1733897613484Running coprocessor pre-close hooks at 1733897613484Disabling compacts and flushes for region at 1733897613484Disabling writes for close at 1733897613484Writing region close event to WAL at 1733897613485 (+1 ms)Running coprocessor post-close hooks at 1733897613507 (+22 ms)Closed at 1733897613507 2024-12-11T06:13:33,509 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:33,509 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=35698d8c3901cf8ffdf9dd340712898c, regionState=CLOSED 2024-12-11T06:13:33,510 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:33,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=57e715a65e88185b1975130a77dde34b, regionState=CLOSED 2024-12-11T06:13:33,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:33,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=91 2024-12-11T06:13:33,524 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 35698d8c3901cf8ffdf9dd340712898c, server=b66397bb9319,44539,1733897428597 in 195 msec 2024-12-11T06:13:33,525 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=35698d8c3901cf8ffdf9dd340712898c, UNASSIGN in 204 msec 2024-12-11T06:13:33,526 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:33,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=92 2024-12-11T06:13:33,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 57e715a65e88185b1975130a77dde34b, server=b66397bb9319,35939,1733897428441 in 199 msec 2024-12-11T06:13:33,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-12-11T06:13:33,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=57e715a65e88185b1975130a77dde34b, UNASSIGN in 218 msec 2024-12-11T06:13:33,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-11T06:13:33,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 224 msec 2024-12-11T06:13:33,550 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897613550"}]},"ts":"1733897613550"} 2024-12-11T06:13:33,554 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-11T06:13:33,554 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-11T06:13:33,559 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 247 msec 2024-12-11T06:13:33,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-11T06:13:33,627 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T06:13:33,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-11T06:13:33,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,630 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-11T06:13:33,631 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,633 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-11T06:13:33,635 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:33,635 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:33,637 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/recovered.edits] 2024-12-11T06:13:33,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,638 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/recovered.edits] 2024-12-11T06:13:33,638 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,641 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-11T06:13:33,642 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,643 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-11T06:13:33,644 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:33,644 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/cf/7ec21f491b7b4ad7a6ba4c7ddcaf7f29 2024-12-11T06:13:33,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-11T06:13:33,646 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/cf/4982bac5bf0c4bddaa37f0818171ac75 2024-12-11T06:13:33,648 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c/recovered.edits/9.seqid 2024-12-11T06:13:33,649 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:33,649 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b/recovered.edits/9.seqid 2024-12-11T06:13:33,650 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithResetTtl/57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:33,650 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-11T06:13:33,651 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-11T06:13:33,651 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-11T06:13:33,657 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241211d1fc5d158b4f4f9881f72d667d4b3974_35698d8c3901cf8ffdf9dd340712898c 2024-12-11T06:13:33,657 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241211173c744f4af143ee859a8072527f210f_57e715a65e88185b1975130a77dde34b 2024-12-11T06:13:33,658 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-11T06:13:33,660 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,663 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-11T06:13:33,666 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-11T06:13:33,667 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,667 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-11T06:13:33,667 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897613667"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,667 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897613667"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,670 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:13:33,670 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 35698d8c3901cf8ffdf9dd340712898c, NAME => 'testtb-testExportWithResetTtl,,1733897588333.35698d8c3901cf8ffdf9dd340712898c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 57e715a65e88185b1975130a77dde34b, NAME => 'testtb-testExportWithResetTtl,1,1733897588333.57e715a65e88185b1975130a77dde34b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:13:33,670 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-11T06:13:33,671 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897613670"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:33,673 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-11T06:13:33,674 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-11T06:13:33,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 46 msec 2024-12-11T06:13:33,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-11T06:13:33,758 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-11T06:13:33,758 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-11T06:13:33,775 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T06:13:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-11T06:13:33,780 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-11T06:13:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-11T06:13:33,784 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-11T06:13:33,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-11T06:13:33,840 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=792 (was 789) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:53610 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3046 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:44136 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:55648 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 2162) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:43721 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1858177112_1 at /127.0.0.1:40634 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1858177112_1 at /127.0.0.1:49426 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43721 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=787 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=752 (was 683) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=1973 (was 2212) 2024-12-11T06:13:33,840 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-11T06:13:33,880 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=792, OpenFileDescriptor=787, MaxFileDescriptor=1048576, SystemLoadAverage=752, ProcessCount=20, AvailableMemoryMB=1970 2024-12-11T06:13:33,880 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-11T06:13:33,882 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:13:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:33,885 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:13:33,885 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-11T06:13:33,886 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:13:33,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T06:13:33,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742037_1213 (size=443) 2024-12-11T06:13:33,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742037_1213 (size=443) 2024-12-11T06:13:33,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742037_1213 (size=443) 2024-12-11T06:13:33,944 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 90476011f31d4f2c39d6520668d6b5b8, NAME => 'testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:33,953 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ad1e0d487f27c94e5de4d9cd6038dcec, NAME => 'testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:33,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T06:13:34,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742038_1214 (size=68) 2024-12-11T06:13:34,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742038_1214 (size=68) 2024-12-11T06:13:34,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742038_1214 (size=68) 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 90476011f31d4f2c39d6520668d6b5b8, disabling compactions & flushes 2024-12-11T06:13:34,003 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. after waiting 0 ms 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,003 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,003 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 90476011f31d4f2c39d6520668d6b5b8: Waiting for close lock at 1733897614003Disabling compacts and flushes for region at 1733897614003Disabling writes for close at 1733897614003Writing region close event to WAL at 1733897614003Closed at 1733897614003 2024-12-11T06:13:34,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742039_1215 (size=68) 2024-12-11T06:13:34,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742039_1215 (size=68) 2024-12-11T06:13:34,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742039_1215 (size=68) 2024-12-11T06:13:34,016 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:34,016 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing ad1e0d487f27c94e5de4d9cd6038dcec, disabling compactions & flushes 2024-12-11T06:13:34,016 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,016 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,017 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. after waiting 0 ms 2024-12-11T06:13:34,017 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,017 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,017 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for ad1e0d487f27c94e5de4d9cd6038dcec: Waiting for close lock at 1733897614016Disabling compacts and flushes for region at 1733897614016Disabling writes for close at 1733897614017 (+1 ms)Writing region close event to WAL at 1733897614017Closed at 1733897614017 2024-12-11T06:13:34,020 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:13:34,020 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733897614020"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897614020"}]},"ts":"1733897614020"} 2024-12-11T06:13:34,020 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733897614020"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897614020"}]},"ts":"1733897614020"} 2024-12-11T06:13:34,028 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:13:34,029 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:13:34,029 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897614029"}]},"ts":"1733897614029"} 2024-12-11T06:13:34,032 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T06:13:34,033 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:13:34,034 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:13:34,034 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:13:34,034 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:13:34,034 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:13:34,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, ASSIGN}] 2024-12-11T06:13:34,036 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, ASSIGN 2024-12-11T06:13:34,036 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, ASSIGN 2024-12-11T06:13:34,037 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:13:34,037 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:13:34,188 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:13:34,188 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ad1e0d487f27c94e5de4d9cd6038dcec, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:34,188 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=90476011f31d4f2c39d6520668d6b5b8, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:34,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, ASSIGN because future has completed 2024-12-11T06:13:34,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:34,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, ASSIGN because future has completed 2024-12-11T06:13:34,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:13:34,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T06:13:34,347 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => ad1e0d487f27c94e5de4d9cd6038dcec, NAME => 'testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:13:34,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. service=AccessControlService 2024-12-11T06:13:34,348 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:34,349 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,349 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:34,349 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,349 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,355 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,355 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 90476011f31d4f2c39d6520668d6b5b8, NAME => 'testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:13:34,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. service=AccessControlService 2024-12-11T06:13:34,356 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:34,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:34,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,357 INFO [StoreOpener-ad1e0d487f27c94e5de4d9cd6038dcec-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,360 INFO [StoreOpener-ad1e0d487f27c94e5de4d9cd6038dcec-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad1e0d487f27c94e5de4d9cd6038dcec columnFamilyName cf 2024-12-11T06:13:34,363 DEBUG [StoreOpener-ad1e0d487f27c94e5de4d9cd6038dcec-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:34,364 INFO [StoreOpener-ad1e0d487f27c94e5de4d9cd6038dcec-1 {}] regionserver.HStore(327): Store=ad1e0d487f27c94e5de4d9cd6038dcec/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:34,364 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,365 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,365 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,366 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,366 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,368 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,371 INFO [StoreOpener-90476011f31d4f2c39d6520668d6b5b8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,373 INFO [StoreOpener-90476011f31d4f2c39d6520668d6b5b8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90476011f31d4f2c39d6520668d6b5b8 columnFamilyName cf 2024-12-11T06:13:34,374 DEBUG [StoreOpener-90476011f31d4f2c39d6520668d6b5b8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:34,374 INFO [StoreOpener-90476011f31d4f2c39d6520668d6b5b8-1 {}] regionserver.HStore(327): Store=90476011f31d4f2c39d6520668d6b5b8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:34,375 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,376 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:34,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,377 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened ad1e0d487f27c94e5de4d9cd6038dcec; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70925449, jitterRate=0.05687154829502106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:34,377 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,378 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for ad1e0d487f27c94e5de4d9cd6038dcec: Running coprocessor pre-open hook at 1733897614349Writing region info on filesystem at 1733897614349Initializing all the Stores at 1733897614354 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897614354Cleaning up temporary data from old regions at 1733897614366 (+12 ms)Running coprocessor post-open hooks at 1733897614377 (+11 ms)Region opened successfully at 1733897614378 (+1 ms) 2024-12-11T06:13:34,380 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,388 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:34,388 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec., pid=99, masterSystemTime=1733897614343 2024-12-11T06:13:34,389 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 90476011f31d4f2c39d6520668d6b5b8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73922360, jitterRate=0.10152900218963623}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:34,389 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,389 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 90476011f31d4f2c39d6520668d6b5b8: Running coprocessor pre-open hook at 1733897614356Writing region info on filesystem at 1733897614356Initializing all the Stores at 1733897614358 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897614358Cleaning up temporary data from old regions at 1733897614377 (+19 ms)Running coprocessor post-open hooks at 1733897614389 (+12 ms)Region opened successfully at 1733897614389 2024-12-11T06:13:34,390 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8., pid=100, masterSystemTime=1733897614346 2024-12-11T06:13:34,391 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,391 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,392 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ad1e0d487f27c94e5de4d9cd6038dcec, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:34,395 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,395 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,395 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=90476011f31d4f2c39d6520668d6b5b8, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:34,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:34,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:13:34,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-11T06:13:34,404 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597 in 209 msec 2024-12-11T06:13:34,406 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, ASSIGN in 369 msec 2024-12-11T06:13:34,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-11T06:13:34,406 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663 in 210 msec 2024-12-11T06:13:34,409 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-12-11T06:13:34,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, ASSIGN in 371 msec 2024-12-11T06:13:34,410 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:13:34,411 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897614410"}]},"ts":"1733897614410"} 2024-12-11T06:13:34,413 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T06:13:34,414 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:13:34,414 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-11T06:13:34,418 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:13:34,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:34,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:34,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:34,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:34,423 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:34,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:34,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:34,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:34,425 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 541 msec 2024-12-11T06:13:34,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-11T06:13:34,517 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T06:13:34,517 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:34,519 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-11T06:13:34,519 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,520 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:34,521 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:34,527 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:34,533 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:34,535 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:13:34,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897614535 (current time:1733897614535). 2024-12-11T06:13:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T06:13:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c8f3660, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:34,538 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:34,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:34,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:34,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e34cd6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,538 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:34,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:34,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,540 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:34,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fbc5fce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:34,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:34,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:34,543 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:34,544 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:34,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:34,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,544 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:34,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3eaf42, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:34,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:34,545 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38d01652, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:34,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,547 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54668, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:34,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42867cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:34,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:34,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:34,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:34,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:34,551 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:34,553 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:34,553 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:13:34,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:34,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:13:34,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T06:13:34,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T06:13:34,557 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:34,559 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:34,561 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:34,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742040_1216 (size=170) 2024-12-11T06:13:34,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742040_1216 (size=170) 2024-12-11T06:13:34,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742040_1216 (size=170) 2024-12-11T06:13:34,585 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:34,586 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec}] 2024-12-11T06:13:34,587 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:34,587 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:34,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T06:13:34,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-11T06:13:34,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for ad1e0d487f27c94e5de4d9cd6038dcec: 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 90476011f31d4f2c39d6520668d6b5b8: 2024-12-11T06:13:34,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. for emptySnaptb0-testExportFileSystemState completed. 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:34,740 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742041_1217 (size=71) 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742042_1218 (size=71) 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742041_1217 (size=71) 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742041_1217 (size=71) 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742042_1218 (size=71) 2024-12-11T06:13:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742042_1218 (size=71) 2024-12-11T06:13:34,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T06:13:35,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:35,153 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-11T06:13:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-11T06:13:35,153 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,154 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:35,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-11T06:13:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-11T06:13:35,154 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,154 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 in 569 msec 2024-12-11T06:13:35,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-11T06:13:35,157 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec in 570 msec 2024-12-11T06:13:35,157 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:35,157 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:35,158 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:35,158 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:35,159 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:35,159 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:13:35,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742043_1219 (size=63) 2024-12-11T06:13:35,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742043_1219 (size=63) 2024-12-11T06:13:35,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742043_1219 (size=63) 2024-12-11T06:13:35,167 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:35,167 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:35,168 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:35,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742044_1220 (size=653) 2024-12-11T06:13:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742044_1220 (size=653) 2024-12-11T06:13:35,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742044_1220 (size=653) 2024-12-11T06:13:35,178 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:35,183 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:35,183 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:35,185 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:35,185 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-11T06:13:35,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 631 msec 2024-12-11T06:13:35,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-11T06:13:35,187 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T06:13:35,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:35,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:35,198 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:35,200 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-11T06:13:35,200 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:35,201 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:35,202 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:35,207 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:35,213 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:13:35,215 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:13:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897615215 (current time:1733897615215). 2024-12-11T06:13:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-11T06:13:35,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@486851c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:35,217 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:35,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:35,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:35,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc3ea5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:35,217 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:35,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,218 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:35,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c7cfbeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:35,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:35,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:35,221 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:35,223 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,223 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b6229ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:35,224 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:35,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:35,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:35,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3efe0dd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:35,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:35,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,226 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54708, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:35,226 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@231ce844, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:35,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:35,227 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:35,228 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:35,230 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:35,231 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:13:35,231 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:13:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T06:13:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T06:13:35,233 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:35,234 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:35,236 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742045_1221 (size=165) 2024-12-11T06:13:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742045_1221 (size=165) 2024-12-11T06:13:35,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742045_1221 (size=165) 2024-12-11T06:13:35,243 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:35,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec}] 2024-12-11T06:13:35,244 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T06:13:35,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-11T06:13:35,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-11T06:13:35,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:35,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:35,397 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 90476011f31d4f2c39d6520668d6b5b8 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-11T06:13:35,397 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing ad1e0d487f27c94e5de4d9cd6038dcec 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-11T06:13:35,415 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 is 71, key is 004f08401bf0d23360d7a57dba3733c6/cf:q/1733897615193/Put/seqid=0 2024-12-11T06:13:35,416 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec is 71, key is 1151b02d8a04978c73e1fb3fdea29652/cf:q/1733897615196/Put/seqid=0 2024-12-11T06:13:35,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742046_1222 (size=5241) 2024-12-11T06:13:35,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742047_1223 (size=8031) 2024-12-11T06:13:35,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742046_1222 (size=5241) 2024-12-11T06:13:35,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:35,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:35,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742046_1222 (size=5241) 2024-12-11T06:13:35,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742047_1223 (size=8031) 2024-12-11T06:13:35,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742047_1223 (size=8031) 2024-12-11T06:13:35,433 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,434 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/.tmp/cf/0fb7651850ac475f994651a87e06e479, store: [table=testtb-testExportFileSystemState family=cf region=90476011f31d4f2c39d6520668d6b5b8] 2024-12-11T06:13:35,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/.tmp/cf/afa1949f7d764f9eb7f8e4f07518fc73, store: [table=testtb-testExportFileSystemState family=cf region=ad1e0d487f27c94e5de4d9cd6038dcec] 2024-12-11T06:13:35,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/.tmp/cf/0fb7651850ac475f994651a87e06e479 is 209, key is 034b6a747bdd14b34500665406da96cbe/cf:q/1733897615193/Put/seqid=0 2024-12-11T06:13:35,435 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/.tmp/cf/afa1949f7d764f9eb7f8e4f07518fc73 is 209, key is 1adaeb9c4e0543ce8a93ce50db4fe68e1/cf:q/1733897615196/Put/seqid=0 2024-12-11T06:13:35,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742048_1224 (size=6326) 2024-12-11T06:13:35,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742048_1224 (size=6326) 2024-12-11T06:13:35,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742049_1225 (size=14587) 2024-12-11T06:13:35,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742048_1224 (size=6326) 2024-12-11T06:13:35,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742049_1225 (size=14587) 2024-12-11T06:13:35,442 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/.tmp/cf/0fb7651850ac475f994651a87e06e479 2024-12-11T06:13:35,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742049_1225 (size=14587) 2024-12-11T06:13:35,442 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/.tmp/cf/afa1949f7d764f9eb7f8e4f07518fc73 2024-12-11T06:13:35,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/.tmp/cf/0fb7651850ac475f994651a87e06e479 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479 2024-12-11T06:13:35,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/.tmp/cf/afa1949f7d764f9eb7f8e4f07518fc73 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73 2024-12-11T06:13:35,454 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479, entries=5, sequenceid=6, filesize=6.2 K 2024-12-11T06:13:35,454 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73, entries=45, sequenceid=6, filesize=14.2 K 2024-12-11T06:13:35,455 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 90476011f31d4f2c39d6520668d6b5b8 in 58ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-11T06:13:35,455 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for ad1e0d487f27c94e5de4d9cd6038dcec in 58ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 90476011f31d4f2c39d6520668d6b5b8: 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for ad1e0d487f27c94e5de4d9cd6038dcec: 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. for snaptb0-testExportFileSystemState completed. 2024-12-11T06:13:35,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. for snaptb0-testExportFileSystemState completed. 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479] hfiles 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479 for snapshot=snaptb0-testExportFileSystemState 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73] hfiles 2024-12-11T06:13:35,456 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73 for snapshot=snaptb0-testExportFileSystemState 2024-12-11T06:13:35,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742050_1226 (size=110) 2024-12-11T06:13:35,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742050_1226 (size=110) 2024-12-11T06:13:35,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742051_1227 (size=110) 2024-12-11T06:13:35,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742051_1227 (size=110) 2024-12-11T06:13:35,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:35,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742051_1227 (size=110) 2024-12-11T06:13:35,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742050_1226 (size=110) 2024-12-11T06:13:35,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-11T06:13:35,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:35,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-11T06:13:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-11T06:13:35,472 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,472 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-11T06:13:35,472 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,473 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 90476011f31d4f2c39d6520668d6b5b8 in 229 msec 2024-12-11T06:13:35,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-11T06:13:35,476 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:35,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec in 229 msec 2024-12-11T06:13:35,476 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:35,477 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:35,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:35,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:35,479 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8] hfiles 2024-12-11T06:13:35,479 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:35,479 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:35,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742052_1228 (size=294) 2024-12-11T06:13:35,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742052_1228 (size=294) 2024-12-11T06:13:35,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742052_1228 (size=294) 2024-12-11T06:13:35,489 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:35,489 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-11T06:13:35,490 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T06:13:35,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742053_1229 (size=963) 2024-12-11T06:13:35,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742053_1229 (size=963) 2024-12-11T06:13:35,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742053_1229 (size=963) 2024-12-11T06:13:35,503 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:35,510 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:35,511 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T06:13:35,512 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:35,512 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-11T06:13:35,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 280 msec 2024-12-11T06:13:35,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-11T06:13:35,547 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T06:13:35,547 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547 2024-12-11T06:13:35,547 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:35,576 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:35,576 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T06:13:35,578 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:13:35,583 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-11T06:13:35,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742054_1230 (size=165) 2024-12-11T06:13:35,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742054_1230 (size=165) 2024-12-11T06:13:35,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742054_1230 (size=165) 2024-12-11T06:13:35,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742055_1231 (size=963) 2024-12-11T06:13:35,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742055_1231 (size=963) 2024-12-11T06:13:35,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742055_1231 (size=963) 2024-12-11T06:13:35,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:35,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:35,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:35,945 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000005/launch_container.sh] 2024-12-11T06:13:35,945 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000005/container_tokens] 2024-12-11T06:13:35,945 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000005/sysfs] 2024-12-11T06:13:36,613 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-14220620916990411816.jar 2024-12-11T06:13:36,614 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-7293827036991758046.jar 2024-12-11T06:13:36,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:36,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:13:36,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:13:36,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:13:36,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:13:36,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:13:36,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:13:36,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:13:36,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:13:36,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:13:36,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:13:36,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:13:36,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:36,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:36,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:36,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:36,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:13:36,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:36,699 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:13:36,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742056_1232 (size=131440) 2024-12-11T06:13:36,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742056_1232 (size=131440) 2024-12-11T06:13:36,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742056_1232 (size=131440) 2024-12-11T06:13:36,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T06:13:36,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T06:13:36,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742057_1233 (size=4188619) 2024-12-11T06:13:36,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T06:13:36,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T06:13:36,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742058_1234 (size=1323991) 2024-12-11T06:13:36,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742059_1235 (size=903542) 2024-12-11T06:13:36,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742059_1235 (size=903542) 2024-12-11T06:13:36,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742059_1235 (size=903542) 2024-12-11T06:13:36,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742060_1236 (size=8360360) 2024-12-11T06:13:36,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742060_1236 (size=8360360) 2024-12-11T06:13:36,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742060_1236 (size=8360360) 2024-12-11T06:13:36,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742061_1237 (size=6426220) 2024-12-11T06:13:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742061_1237 (size=6426220) 2024-12-11T06:13:36,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742061_1237 (size=6426220) 2024-12-11T06:13:36,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T06:13:36,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T06:13:36,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742062_1238 (size=1877034) 2024-12-11T06:13:36,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742063_1239 (size=77835) 2024-12-11T06:13:36,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742063_1239 (size=77835) 2024-12-11T06:13:36,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742063_1239 (size=77835) 2024-12-11T06:13:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742064_1240 (size=30949) 2024-12-11T06:13:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742064_1240 (size=30949) 2024-12-11T06:13:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742064_1240 (size=30949) 2024-12-11T06:13:36,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742065_1241 (size=1597213) 2024-12-11T06:13:36,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742065_1241 (size=1597213) 2024-12-11T06:13:36,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742065_1241 (size=1597213) 2024-12-11T06:13:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742066_1242 (size=443172) 2024-12-11T06:13:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742066_1242 (size=443172) 2024-12-11T06:13:36,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742066_1242 (size=443172) 2024-12-11T06:13:36,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T06:13:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T06:13:36,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742067_1243 (size=4695811) 2024-12-11T06:13:36,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742068_1244 (size=232957) 2024-12-11T06:13:36,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742068_1244 (size=232957) 2024-12-11T06:13:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742068_1244 (size=232957) 2024-12-11T06:13:37,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742069_1245 (size=127628) 2024-12-11T06:13:37,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742069_1245 (size=127628) 2024-12-11T06:13:37,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742069_1245 (size=127628) 2024-12-11T06:13:37,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742070_1246 (size=20406) 2024-12-11T06:13:37,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742070_1246 (size=20406) 2024-12-11T06:13:37,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742070_1246 (size=20406) 2024-12-11T06:13:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T06:13:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T06:13:37,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742071_1247 (size=5175431) 2024-12-11T06:13:37,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742072_1248 (size=217634) 2024-12-11T06:13:37,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742072_1248 (size=217634) 2024-12-11T06:13:37,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742072_1248 (size=217634) 2024-12-11T06:13:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T06:13:37,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T06:13:37,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742073_1249 (size=1832290) 2024-12-11T06:13:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742074_1250 (size=322274) 2024-12-11T06:13:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742074_1250 (size=322274) 2024-12-11T06:13:37,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742074_1250 (size=322274) 2024-12-11T06:13:37,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742075_1251 (size=503880) 2024-12-11T06:13:37,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742075_1251 (size=503880) 2024-12-11T06:13:37,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742075_1251 (size=503880) 2024-12-11T06:13:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742076_1252 (size=29229) 2024-12-11T06:13:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742076_1252 (size=29229) 2024-12-11T06:13:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742076_1252 (size=29229) 2024-12-11T06:13:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742077_1253 (size=24096) 2024-12-11T06:13:37,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742077_1253 (size=24096) 2024-12-11T06:13:37,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742077_1253 (size=24096) 2024-12-11T06:13:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742078_1254 (size=111872) 2024-12-11T06:13:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742078_1254 (size=111872) 2024-12-11T06:13:37,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742078_1254 (size=111872) 2024-12-11T06:13:37,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742079_1255 (size=45609) 2024-12-11T06:13:37,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742079_1255 (size=45609) 2024-12-11T06:13:37,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742079_1255 (size=45609) 2024-12-11T06:13:37,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742080_1256 (size=136454) 2024-12-11T06:13:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742080_1256 (size=136454) 2024-12-11T06:13:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742080_1256 (size=136454) 2024-12-11T06:13:37,131 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:13:37,134 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-11T06:13:37,136 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-12-11T06:13:37,136 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-11T06:13:37,136 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.2 K 2024-12-11T06:13:37,136 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T06:13:37,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742081_1257 (size=1035) 2024-12-11T06:13:37,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742081_1257 (size=1035) 2024-12-11T06:13:37,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742081_1257 (size=1035) 2024-12-11T06:13:37,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742082_1258 (size=35) 2024-12-11T06:13:37,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742082_1258 (size=35) 2024-12-11T06:13:37,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742082_1258 (size=35) 2024-12-11T06:13:37,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742083_1259 (size=304004) 2024-12-11T06:13:37,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742083_1259 (size=304004) 2024-12-11T06:13:37,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742083_1259 (size=304004) 2024-12-11T06:13:37,257 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:13:37,257 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:13:37,260 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0003_000001 (auth:SIMPLE) from 127.0.0.1:34708 2024-12-11T06:13:37,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000001/launch_container.sh] 2024-12-11T06:13:37,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000001/container_tokens] 2024-12-11T06:13:37,272 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0003/container_1733897436125_0003_01_000001/sysfs] 2024-12-11T06:13:38,005 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:34332 2024-12-11T06:13:38,586 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:13:38,615 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T06:13:38,615 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T06:13:38,616 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-11T06:13:38,616 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-11T06:13:43,490 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:54190 2024-12-11T06:13:43,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742084_1260 (size=349702) 2024-12-11T06:13:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742084_1260 (size=349702) 2024-12-11T06:13:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742084_1260 (size=349702) 2024-12-11T06:13:44,118 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:13:45,793 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:43074 2024-12-11T06:13:45,793 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:43520 2024-12-11T06:13:46,617 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:43532 2024-12-11T06:13:46,622 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:43088 2024-12-11T06:13:49,261 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:13:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742085_1261 (size=14587) 2024-12-11T06:13:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742085_1261 (size=14587) 2024-12-11T06:13:52,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742085_1261 (size=14587) 2024-12-11T06:13:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742087_1263 (size=6326) 2024-12-11T06:13:54,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742087_1263 (size=6326) 2024-12-11T06:13:54,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742087_1263 (size=6326) 2024-12-11T06:13:54,396 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000004/launch_container.sh] 2024-12-11T06:13:54,396 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000004/container_tokens] 2024-12-11T06:13:54,396 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000004/sysfs] 2024-12-11T06:13:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742088_1264 (size=8031) 2024-12-11T06:13:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742088_1264 (size=8031) 2024-12-11T06:13:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742088_1264 (size=8031) 2024-12-11T06:13:54,677 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000003/launch_container.sh] 2024-12-11T06:13:54,677 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000003/container_tokens] 2024-12-11T06:13:54,677 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000003/sysfs] 2024-12-11T06:13:54,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742089_1265 (size=5241) 2024-12-11T06:13:54,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742089_1265 (size=5241) 2024-12-11T06:13:54,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742089_1265 (size=5241) 2024-12-11T06:13:54,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742086_1262 (size=31749) 2024-12-11T06:13:54,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742086_1262 (size=31749) 2024-12-11T06:13:54,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742086_1262 (size=31749) 2024-12-11T06:13:54,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742090_1266 (size=466) 2024-12-11T06:13:54,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742090_1266 (size=466) 2024-12-11T06:13:54,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742090_1266 (size=466) 2024-12-11T06:13:54,830 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000005/launch_container.sh] 2024-12-11T06:13:54,830 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000005/container_tokens] 2024-12-11T06:13:54,830 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000005/sysfs] 2024-12-11T06:13:55,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742091_1267 (size=31749) 2024-12-11T06:13:55,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742091_1267 (size=31749) 2024-12-11T06:13:55,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742091_1267 (size=31749) 2024-12-11T06:13:55,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742092_1268 (size=349702) 2024-12-11T06:13:55,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742092_1268 (size=349702) 2024-12-11T06:13:55,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742092_1268 (size=349702) 2024-12-11T06:13:55,361 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:50980 2024-12-11T06:13:55,368 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:50996 2024-12-11T06:13:56,296 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:13:57,398 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:13:57,399 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:13:57,413 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-11T06:13:57,413 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:13:57,414 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:13:57,414 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T06:13:57,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T06:13:57,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T06:13:57,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-11T06:13:57,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-11T06:13:57,415 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897615547/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-11T06:13:57,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-11T06:13:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T06:13:57,445 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897637445"}]},"ts":"1733897637445"} 2024-12-11T06:13:57,447 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T06:13:57,447 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-11T06:13:57,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-11T06:13:57,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, UNASSIGN}] 2024-12-11T06:13:57,451 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, UNASSIGN 2024-12-11T06:13:57,451 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, UNASSIGN 2024-12-11T06:13:57,452 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ad1e0d487f27c94e5de4d9cd6038dcec, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:13:57,452 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=90476011f31d4f2c39d6520668d6b5b8, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:57,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, UNASSIGN because future has completed 2024-12-11T06:13:57,454 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:57,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:13:57,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, UNASSIGN because future has completed 2024-12-11T06:13:57,455 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:13:57,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:13:57,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T06:13:57,607 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:57,607 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing ad1e0d487f27c94e5de4d9cd6038dcec, disabling compactions & flushes 2024-12-11T06:13:57,608 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:57,608 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. after waiting 0 ms 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 90476011f31d4f2c39d6520668d6b5b8, disabling compactions & flushes 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:57,608 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. after waiting 0 ms 2024-12-11T06:13:57,608 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:57,620 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:57,621 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:57,621 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8. 2024-12-11T06:13:57,621 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 90476011f31d4f2c39d6520668d6b5b8: Waiting for close lock at 1733897637608Running coprocessor pre-close hooks at 1733897637608Disabling compacts and flushes for region at 1733897637608Disabling writes for close at 1733897637608Writing region close event to WAL at 1733897637610 (+2 ms)Running coprocessor post-close hooks at 1733897637621 (+11 ms)Closed at 1733897637621 2024-12-11T06:13:57,624 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:57,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=90476011f31d4f2c39d6520668d6b5b8, regionState=CLOSED 2024-12-11T06:13:57,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:13:57,629 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:13:57,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-11T06:13:57,630 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 90476011f31d4f2c39d6520668d6b5b8, server=b66397bb9319,42579,1733897428663 in 173 msec 2024-12-11T06:13:57,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=90476011f31d4f2c39d6520668d6b5b8, UNASSIGN in 180 msec 2024-12-11T06:13:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:13:57,632 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec. 2024-12-11T06:13:57,632 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for ad1e0d487f27c94e5de4d9cd6038dcec: Waiting for close lock at 1733897637607Running coprocessor pre-close hooks at 1733897637607Disabling compacts and flushes for region at 1733897637607Disabling writes for close at 1733897637608 (+1 ms)Writing region close event to WAL at 1733897637609 (+1 ms)Running coprocessor post-close hooks at 1733897637632 (+23 ms)Closed at 1733897637632 2024-12-11T06:13:57,634 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:57,634 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ad1e0d487f27c94e5de4d9cd6038dcec, regionState=CLOSED 2024-12-11T06:13:57,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:13:57,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-11T06:13:57,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure ad1e0d487f27c94e5de4d9cd6038dcec, server=b66397bb9319,44539,1733897428597 in 186 msec 2024-12-11T06:13:57,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-12-11T06:13:57,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ad1e0d487f27c94e5de4d9cd6038dcec, UNASSIGN in 191 msec 2024-12-11T06:13:57,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-11T06:13:57,645 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 196 msec 2024-12-11T06:13:57,647 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897637646"}]},"ts":"1733897637646"} 2024-12-11T06:13:57,649 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T06:13:57,649 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-11T06:13:57,652 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 209 msec 2024-12-11T06:13:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-11T06:13:57,757 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T06:13:57,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-11T06:13:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,760 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-11T06:13:57,761 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,767 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-11T06:13:57,769 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:57,770 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:57,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,775 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/recovered.edits] 2024-12-11T06:13:57,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T06:13:57,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T06:13:57,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T06:13:57,775 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-11T06:13:57,775 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/recovered.edits] 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-11T06:13:57,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-11T06:13:57,788 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/cf/0fb7651850ac475f994651a87e06e479 2024-12-11T06:13:57,789 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/cf/afa1949f7d764f9eb7f8e4f07518fc73 2024-12-11T06:13:57,797 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8/recovered.edits/9.seqid 2024-12-11T06:13:57,798 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:57,798 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec/recovered.edits/9.seqid 2024-12-11T06:13:57,799 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemState/ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:57,799 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-11T06:13:57,800 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-11T06:13:57,801 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-11T06:13:57,806 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e20241211714308e2bf4a42068ab863ed590a7b43_90476011f31d4f2c39d6520668d6b5b8 2024-12-11T06:13:57,806 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412111af52c153a914fc88542cdccc5b5ed7c_ad1e0d487f27c94e5de4d9cd6038dcec 2024-12-11T06:13:57,807 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-11T06:13:57,809 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,812 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-11T06:13:57,816 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-11T06:13:57,817 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,817 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-11T06:13:57,818 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897637817"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:57,818 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897637817"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:57,822 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:13:57,822 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 90476011f31d4f2c39d6520668d6b5b8, NAME => 'testtb-testExportFileSystemState,,1733897613882.90476011f31d4f2c39d6520668d6b5b8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ad1e0d487f27c94e5de4d9cd6038dcec, NAME => 'testtb-testExportFileSystemState,1,1733897613882.ad1e0d487f27c94e5de4d9cd6038dcec.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:13:57,822 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-11T06:13:57,822 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897637822"}]},"ts":"9223372036854775807"} 2024-12-11T06:13:57,825 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-11T06:13:57,828 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-11T06:13:57,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 70 msec 2024-12-11T06:13:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-11T06:13:57,888 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-11T06:13:57,888 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-11T06:13:57,902 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T06:13:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-11T06:13:57,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-11T06:13:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-11T06:13:57,957 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=798 (was 792) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:60686 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1195192572_1 at /127.0.0.1:60668 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3895 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35883 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1195192572_1 at /127.0.0.1:45082 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:35883 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:43710 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 5723) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:45118 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=783 (was 787), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=818 (was 752) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=1652 (was 1970) 2024-12-11T06:13:57,957 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T06:13:57,978 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=798, OpenFileDescriptor=783, MaxFileDescriptor=1048576, SystemLoadAverage=818, ProcessCount=24, AvailableMemoryMB=1649 2024-12-11T06:13:57,978 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-11T06:13:57,980 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:13:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:13:57,983 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:13:57,983 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-11T06:13:57,985 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:13:57,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T06:13:57,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742093_1269 (size=440) 2024-12-11T06:13:57,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742093_1269 (size=440) 2024-12-11T06:13:57,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742093_1269 (size=440) 2024-12-11T06:13:58,000 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3ca44cdf05d63e72dd38ad533ec63381, NAME => 'testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:58,001 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0d420ffea61e34799c9676e7af0f3a9e, NAME => 'testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:58,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-11T06:13:58,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742094_1270 (size=65) 2024-12-11T06:13:58,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742095_1271 (size=65) 2024-12-11T06:13:58,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742094_1270 (size=65) 2024-12-11T06:13:58,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742095_1271 (size=65) 2024-12-11T06:13:58,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742095_1271 (size=65) 2024-12-11T06:13:58,074 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:58,074 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 0d420ffea61e34799c9676e7af0f3a9e, disabling compactions & flushes 2024-12-11T06:13:58,074 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,074 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,074 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. after waiting 0 ms 2024-12-11T06:13:58,074 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,074 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,075 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0d420ffea61e34799c9676e7af0f3a9e: Waiting for close lock at 1733897638074Disabling compacts and flushes for region at 1733897638074Disabling writes for close at 1733897638074Writing region close event to WAL at 1733897638074Closed at 1733897638074 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 3ca44cdf05d63e72dd38ad533ec63381, disabling compactions & flushes 2024-12-11T06:13:58,076 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. after waiting 0 ms 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,076 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,076 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3ca44cdf05d63e72dd38ad533ec63381: Waiting for close lock at 1733897638076Disabling compacts and flushes for region at 1733897638076Disabling writes for close at 1733897638076Writing region close event to WAL at 1733897638076Closed at 1733897638076 2024-12-11T06:13:58,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742094_1270 (size=65) 2024-12-11T06:13:58,078 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:13:58,078 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897638078"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897638078"}]},"ts":"1733897638078"} 2024-12-11T06:13:58,078 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897638078"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897638078"}]},"ts":"1733897638078"} 2024-12-11T06:13:58,087 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:13:58,088 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:13:58,088 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897638088"}]},"ts":"1733897638088"} 2024-12-11T06:13:58,090 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-11T06:13:58,091 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:13:58,092 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:13:58,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:13:58,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:13:58,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:13:58,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:13:58,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, ASSIGN}] 2024-12-11T06:13:58,095 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, ASSIGN 2024-12-11T06:13:58,096 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, ASSIGN 2024-12-11T06:13:58,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T06:13:58,097 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:13:58,098 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:13:58,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000002/launch_container.sh] 2024-12-11T06:13:58,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000002/container_tokens] 2024-12-11T06:13:58,122 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000002/sysfs] 2024-12-11T06:13:58,248 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:13:58,249 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=3ca44cdf05d63e72dd38ad533ec63381, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:58,250 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0d420ffea61e34799c9676e7af0f3a9e, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:58,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, ASSIGN because future has completed 2024-12-11T06:13:58,254 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:13:58,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, ASSIGN because future has completed 2024-12-11T06:13:58,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:13:58,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T06:13:58,412 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,412 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 3ca44cdf05d63e72dd38ad533ec63381, NAME => 'testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:13:58,412 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. service=AccessControlService 2024-12-11T06:13:58,412 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d420ffea61e34799c9676e7af0f3a9e, NAME => 'testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. service=AccessControlService 2024-12-11T06:13:58,413 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:58,413 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,413 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,415 INFO [StoreOpener-3ca44cdf05d63e72dd38ad533ec63381-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,416 INFO [StoreOpener-0d420ffea61e34799c9676e7af0f3a9e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,417 INFO [StoreOpener-3ca44cdf05d63e72dd38ad533ec63381-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3ca44cdf05d63e72dd38ad533ec63381 columnFamilyName cf 2024-12-11T06:13:58,417 INFO [StoreOpener-0d420ffea61e34799c9676e7af0f3a9e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d420ffea61e34799c9676e7af0f3a9e columnFamilyName cf 2024-12-11T06:13:58,418 DEBUG [StoreOpener-0d420ffea61e34799c9676e7af0f3a9e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:58,419 INFO [StoreOpener-0d420ffea61e34799c9676e7af0f3a9e-1 {}] regionserver.HStore(327): Store=0d420ffea61e34799c9676e7af0f3a9e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:58,419 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,420 DEBUG [StoreOpener-3ca44cdf05d63e72dd38ad533ec63381-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:58,420 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,421 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,428 INFO [StoreOpener-3ca44cdf05d63e72dd38ad533ec63381-1 {}] regionserver.HStore(327): Store=3ca44cdf05d63e72dd38ad533ec63381/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:13:58,428 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,428 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,428 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,436 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,436 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,437 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,437 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,437 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,439 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,440 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:58,452 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 0d420ffea61e34799c9676e7af0f3a9e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65384371, jitterRate=-0.025696948170661926}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:58,452 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,453 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 0d420ffea61e34799c9676e7af0f3a9e: Running coprocessor pre-open hook at 1733897638413Writing region info on filesystem at 1733897638413Initializing all the Stores at 1733897638415 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897638415Cleaning up temporary data from old regions at 1733897638428 (+13 ms)Running coprocessor post-open hooks at 1733897638452 (+24 ms)Region opened successfully at 1733897638453 (+1 ms) 2024-12-11T06:13:58,455 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e., pid=118, masterSystemTime=1733897638408 2024-12-11T06:13:58,461 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:13:58,463 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 3ca44cdf05d63e72dd38ad533ec63381; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65742472, jitterRate=-0.020360827445983887}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:13:58,463 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,463 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 3ca44cdf05d63e72dd38ad533ec63381: Running coprocessor pre-open hook at 1733897638413Writing region info on filesystem at 1733897638414 (+1 ms)Initializing all the Stores at 1733897638414Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897638415 (+1 ms)Cleaning up temporary data from old regions at 1733897638437 (+22 ms)Running coprocessor post-open hooks at 1733897638463 (+26 ms)Region opened successfully at 1733897638463 2024-12-11T06:13:58,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=0d420ffea61e34799c9676e7af0f3a9e, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:13:58,466 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,466 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:13:58,468 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381., pid=117, masterSystemTime=1733897638408 2024-12-11T06:13:58,474 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=3ca44cdf05d63e72dd38ad533ec63381, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:13:58,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-11T06:13:58,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441 in 212 msec 2024-12-11T06:13:58,475 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,475 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:13:58,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, ASSIGN in 381 msec 2024-12-11T06:13:58,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-11T06:13:58,484 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663 in 224 msec 2024-12-11T06:13:58,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-11T06:13:58,491 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, ASSIGN in 391 msec 2024-12-11T06:13:58,493 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:13:58,493 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897638493"}]},"ts":"1733897638493"} 2024-12-11T06:13:58,496 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-11T06:13:58,497 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:13:58,497 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-11T06:13:58,502 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T06:13:58,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:58,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:58,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:58,505 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:13:58,508 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:58,508 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:58,509 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:58,509 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-11T06:13:58,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 528 msec 2024-12-11T06:13:58,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-11T06:13:58,617 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T06:13:58,617 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:58,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-11T06:13:58,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,620 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:58,622 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:58,628 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:58,633 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:58,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T06:13:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897638636 (current time:1733897638636). 2024-12-11T06:13:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T06:13:58,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef924e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:58,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:58,638 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:58,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:58,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:58,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31ce35c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:58,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:58,639 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,640 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:58,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14a685e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:58,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:58,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:58,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:58,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,645 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1843d095, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:58,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:58,647 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:58,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:58,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:58,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50be348e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:58,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:58,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,648 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:58,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11bb3796, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:58,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:58,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:58,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:58,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:58,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:58,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:13:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:58,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:58,655 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T06:13:58,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T06:13:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T06:13:58,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T06:13:58,658 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:58,659 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:58,661 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:58,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742096_1272 (size=161) 2024-12-11T06:13:58,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742096_1272 (size=161) 2024-12-11T06:13:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742096_1272 (size=161) 2024-12-11T06:13:58,687 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:58,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381}] 2024-12-11T06:13:58,688 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,689 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T06:13:58,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-11T06:13:58,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 3ca44cdf05d63e72dd38ad533ec63381: 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 0d420ffea61e34799c9676e7af0f3a9e: 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. for emptySnaptb0-testConsecutiveExports completed. 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T06:13:58,841 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-11T06:13:58,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:58,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:58,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:58,842 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:13:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742097_1273 (size=68) 2024-12-11T06:13:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742098_1274 (size=68) 2024-12-11T06:13:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742097_1273 (size=68) 2024-12-11T06:13:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742098_1274 (size=68) 2024-12-11T06:13:58,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742098_1274 (size=68) 2024-12-11T06:13:58,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742097_1273 (size=68) 2024-12-11T06:13:58,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-11T06:13:58,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:58,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-11T06:13:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-11T06:13:58,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-11T06:13:58,859 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,859 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:58,860 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:58,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e in 173 msec 2024-12-11T06:13:58,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-11T06:13:58,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 in 174 msec 2024-12-11T06:13:58,863 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:58,864 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:58,865 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:58,865 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:58,865 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:58,866 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:13:58,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742099_1275 (size=60) 2024-12-11T06:13:58,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742099_1275 (size=60) 2024-12-11T06:13:58,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742099_1275 (size=60) 2024-12-11T06:13:58,881 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:58,881 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-11T06:13:58,881 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-11T06:13:58,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742100_1276 (size=641) 2024-12-11T06:13:58,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742100_1276 (size=641) 2024-12-11T06:13:58,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742100_1276 (size=641) 2024-12-11T06:13:58,906 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:58,911 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:58,912 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-11T06:13:58,916 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:58,916 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-11T06:13:58,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 260 msec 2024-12-11T06:13:58,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-11T06:13:58,977 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T06:13:58,985 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:58,992 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:13:58,994 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:58,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-11T06:13:58,998 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:58,998 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:13:59,000 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:59,007 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:59,015 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-11T06:13:59,018 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T06:13:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897639018 (current time:1733897639018). 2024-12-11T06:13:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:13:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-11T06:13:59,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:13:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75fac5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:59,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:59,021 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78adf71e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:59,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,023 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:59,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f1b81a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:59,025 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:59,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:59,027 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:59,028 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,028 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e9fa785, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:13:59,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:13:59,030 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@716ab331, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:13:59,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,031 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40012, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:13:59,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bc454b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:13:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:13:59,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:13:59,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:13:59,034 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:13:59,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:13:59,037 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:13:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:13:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:13:59,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-11T06:13:59,037 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:13:59,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:13:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-11T06:13:59,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T06:13:59,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T06:13:59,040 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:13:59,041 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:13:59,043 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:13:59,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742101_1277 (size=156) 2024-12-11T06:13:59,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742101_1277 (size=156) 2024-12-11T06:13:59,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742101_1277 (size=156) 2024-12-11T06:13:59,054 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:13:59,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381}] 2024-12-11T06:13:59,056 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:59,056 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:59,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T06:13:59,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-11T06:13:59,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-11T06:13:59,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:59,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:59,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 3ca44cdf05d63e72dd38ad533ec63381 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-11T06:13:59,208 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 0d420ffea61e34799c9676e7af0f3a9e 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-11T06:13:59,228 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e is 71, key is 0064b3af1d50cefb0ba70f381d1348d6/cf:q/1733897638985/Put/seqid=0 2024-12-11T06:13:59,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 is 71, key is 141d42f6e8e7b9decb959468c42d6a29/cf:q/1733897638992/Put/seqid=0 2024-12-11T06:13:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742102_1278 (size=5102) 2024-12-11T06:13:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742103_1279 (size=8172) 2024-12-11T06:13:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742102_1278 (size=5102) 2024-12-11T06:13:59,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742103_1279 (size=8172) 2024-12-11T06:13:59,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742102_1278 (size=5102) 2024-12-11T06:13:59,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742103_1279 (size=8172) 2024-12-11T06:13:59,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:59,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:59,244 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:59,244 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:59,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/.tmp/cf/166106fd68014bdda133be0a0e6881be, store: [table=testtb-testConsecutiveExports family=cf region=0d420ffea61e34799c9676e7af0f3a9e] 2024-12-11T06:13:59,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/.tmp/cf/166106fd68014bdda133be0a0e6881be is 206, key is 0dbde34530c648586e0088a85400e725c/cf:q/1733897638985/Put/seqid=0 2024-12-11T06:13:59,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/.tmp/cf/de0cb93ac9f84eb4b02e89c5292f84e2, store: [table=testtb-testConsecutiveExports family=cf region=3ca44cdf05d63e72dd38ad533ec63381] 2024-12-11T06:13:59,246 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/.tmp/cf/de0cb93ac9f84eb4b02e89c5292f84e2 is 206, key is 106431b2be7b038f848bf8251eefedf2f/cf:q/1733897638992/Put/seqid=0 2024-12-11T06:13:59,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742105_1281 (size=14855) 2024-12-11T06:13:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742105_1281 (size=14855) 2024-12-11T06:13:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742104_1280 (size=5906) 2024-12-11T06:13:59,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742105_1281 (size=14855) 2024-12-11T06:13:59,274 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/.tmp/cf/de0cb93ac9f84eb4b02e89c5292f84e2 2024-12-11T06:13:59,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742104_1280 (size=5906) 2024-12-11T06:13:59,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742104_1280 (size=5906) 2024-12-11T06:13:59,276 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/.tmp/cf/166106fd68014bdda133be0a0e6881be 2024-12-11T06:13:59,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/.tmp/cf/166106fd68014bdda133be0a0e6881be as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be 2024-12-11T06:13:59,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/.tmp/cf/de0cb93ac9f84eb4b02e89c5292f84e2 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2 2024-12-11T06:13:59,292 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be, entries=3, sequenceid=6, filesize=5.8 K 2024-12-11T06:13:59,293 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 0d420ffea61e34799c9676e7af0f3a9e in 85ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:59,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 0d420ffea61e34799c9676e7af0f3a9e: 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. for snaptb0-testConsecutiveExports completed. 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be] hfiles 2024-12-11T06:13:59,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be for snapshot=snaptb0-testConsecutiveExports 2024-12-11T06:13:59,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2, entries=47, sequenceid=6, filesize=14.5 K 2024-12-11T06:13:59,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 3ca44cdf05d63e72dd38ad533ec63381 in 92ms, sequenceid=6, compaction requested=false 2024-12-11T06:13:59,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 3ca44cdf05d63e72dd38ad533ec63381: 2024-12-11T06:13:59,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. for snaptb0-testConsecutiveExports completed. 2024-12-11T06:13:59,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-11T06:13:59,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:13:59,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2] hfiles 2024-12-11T06:13:59,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2 for snapshot=snaptb0-testConsecutiveExports 2024-12-11T06:13:59,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742106_1282 (size=107) 2024-12-11T06:13:59,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742106_1282 (size=107) 2024-12-11T06:13:59,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742106_1282 (size=107) 2024-12-11T06:13:59,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:13:59,319 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-11T06:13:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-11T06:13:59,320 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:59,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:59,323 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e in 266 msec 2024-12-11T06:13:59,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742107_1283 (size=107) 2024-12-11T06:13:59,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742107_1283 (size=107) 2024-12-11T06:13:59,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742107_1283 (size=107) 2024-12-11T06:13:59,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:13:59,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-11T06:13:59,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-11T06:13:59,349 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:59,349 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:59,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-11T06:13:59,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381 in 296 msec 2024-12-11T06:13:59,352 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:13:59,353 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:13:59,354 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:13:59,355 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:13:59,355 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:13:59,356 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e] hfiles 2024-12-11T06:13:59,356 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:13:59,356 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:13:59,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T06:13:59,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742108_1284 (size=291) 2024-12-11T06:13:59,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742108_1284 (size=291) 2024-12-11T06:13:59,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742108_1284 (size=291) 2024-12-11T06:13:59,366 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:13:59,366 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-11T06:13:59,367 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T06:13:59,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742109_1285 (size=951) 2024-12-11T06:13:59,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742109_1285 (size=951) 2024-12-11T06:13:59,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742109_1285 (size=951) 2024-12-11T06:13:59,381 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:13:59,388 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:13:59,389 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T06:13:59,390 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:13:59,390 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-11T06:13:59,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 352 msec 2024-12-11T06:13:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-11T06:13:59,667 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T06:13:59,667 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667 2024-12-11T06:13:59,667 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:59,697 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:13:59,697 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@6b7ca24a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T06:13:59,699 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:13:59,703 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T06:13:59,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:59,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:13:59,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-4445423119164580984.jar 2024-12-11T06:14:00,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,796 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-464330237934993076.jar 2024-12-11T06:14:00,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,797 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:00,798 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:14:00,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:14:00,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:14:00,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:14:00,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:14:00,799 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:14:00,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:14:00,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:14:00,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:14:00,800 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:14:00,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:14:00,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:00,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:00,801 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:00,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:00,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:00,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:00,802 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:00,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742110_1286 (size=131440) 2024-12-11T06:14:00,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742110_1286 (size=131440) 2024-12-11T06:14:00,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742110_1286 (size=131440) 2024-12-11T06:14:00,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T06:14:00,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T06:14:00,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742111_1287 (size=4188619) 2024-12-11T06:14:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T06:14:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T06:14:00,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742112_1288 (size=1323991) 2024-12-11T06:14:00,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742113_1289 (size=903542) 2024-12-11T06:14:00,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742113_1289 (size=903542) 2024-12-11T06:14:00,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742113_1289 (size=903542) 2024-12-11T06:14:00,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T06:14:00,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T06:14:00,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742114_1290 (size=8360360) 2024-12-11T06:14:00,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T06:14:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T06:14:00,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742115_1291 (size=1877034) 2024-12-11T06:14:00,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742116_1292 (size=77835) 2024-12-11T06:14:00,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742116_1292 (size=77835) 2024-12-11T06:14:00,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742116_1292 (size=77835) 2024-12-11T06:14:00,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742117_1293 (size=30949) 2024-12-11T06:14:00,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742117_1293 (size=30949) 2024-12-11T06:14:00,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742117_1293 (size=30949) 2024-12-11T06:14:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T06:14:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T06:14:01,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742118_1294 (size=1597213) 2024-12-11T06:14:01,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742119_1295 (size=443172) 2024-12-11T06:14:01,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742119_1295 (size=443172) 2024-12-11T06:14:01,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742119_1295 (size=443172) 2024-12-11T06:14:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742120_1296 (size=4695811) 2024-12-11T06:14:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742120_1296 (size=4695811) 2024-12-11T06:14:01,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742120_1296 (size=4695811) 2024-12-11T06:14:01,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742121_1297 (size=232957) 2024-12-11T06:14:01,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742121_1297 (size=232957) 2024-12-11T06:14:01,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742121_1297 (size=232957) 2024-12-11T06:14:01,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742122_1298 (size=127628) 2024-12-11T06:14:01,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742122_1298 (size=127628) 2024-12-11T06:14:01,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742122_1298 (size=127628) 2024-12-11T06:14:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742123_1299 (size=20406) 2024-12-11T06:14:01,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742123_1299 (size=20406) 2024-12-11T06:14:01,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742123_1299 (size=20406) 2024-12-11T06:14:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742124_1300 (size=6426220) 2024-12-11T06:14:01,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742124_1300 (size=6426220) 2024-12-11T06:14:01,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742124_1300 (size=6426220) 2024-12-11T06:14:01,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742125_1301 (size=5175431) 2024-12-11T06:14:01,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742125_1301 (size=5175431) 2024-12-11T06:14:01,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742125_1301 (size=5175431) 2024-12-11T06:14:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742126_1302 (size=217634) 2024-12-11T06:14:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742126_1302 (size=217634) 2024-12-11T06:14:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742126_1302 (size=217634) 2024-12-11T06:14:01,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742127_1303 (size=1832290) 2024-12-11T06:14:01,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742127_1303 (size=1832290) 2024-12-11T06:14:01,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742127_1303 (size=1832290) 2024-12-11T06:14:01,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742128_1304 (size=322274) 2024-12-11T06:14:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742128_1304 (size=322274) 2024-12-11T06:14:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742128_1304 (size=322274) 2024-12-11T06:14:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742129_1305 (size=503880) 2024-12-11T06:14:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742129_1305 (size=503880) 2024-12-11T06:14:01,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742129_1305 (size=503880) 2024-12-11T06:14:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742130_1306 (size=29229) 2024-12-11T06:14:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742130_1306 (size=29229) 2024-12-11T06:14:01,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742130_1306 (size=29229) 2024-12-11T06:14:01,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742131_1307 (size=24096) 2024-12-11T06:14:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742131_1307 (size=24096) 2024-12-11T06:14:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742131_1307 (size=24096) 2024-12-11T06:14:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742132_1308 (size=111872) 2024-12-11T06:14:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742132_1308 (size=111872) 2024-12-11T06:14:01,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742132_1308 (size=111872) 2024-12-11T06:14:01,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742133_1309 (size=45609) 2024-12-11T06:14:01,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742133_1309 (size=45609) 2024-12-11T06:14:01,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742133_1309 (size=45609) 2024-12-11T06:14:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742134_1310 (size=136454) 2024-12-11T06:14:01,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742134_1310 (size=136454) 2024-12-11T06:14:01,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742134_1310 (size=136454) 2024-12-11T06:14:01,239 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:14:01,242 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T06:14:01,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-11T06:14:01,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T06:14:01,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-11T06:14:01,244 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T06:14:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742135_1311 (size=1023) 2024-12-11T06:14:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742135_1311 (size=1023) 2024-12-11T06:14:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742135_1311 (size=1023) 2024-12-11T06:14:01,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742136_1312 (size=35) 2024-12-11T06:14:01,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742136_1312 (size=35) 2024-12-11T06:14:01,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742136_1312 (size=35) 2024-12-11T06:14:01,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742137_1313 (size=304039) 2024-12-11T06:14:01,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742137_1313 (size=304039) 2024-12-11T06:14:01,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742137_1313 (size=304039) 2024-12-11T06:14:01,468 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:01,468 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:01,473 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0004_000001 (auth:SIMPLE) from 127.0.0.1:45116 2024-12-11T06:14:01,486 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000001/launch_container.sh] 2024-12-11T06:14:01,486 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000001/container_tokens] 2024-12-11T06:14:01,487 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0004/container_1733897436125_0004_01_000001/sysfs] 2024-12-11T06:14:01,818 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:51010 2024-12-11T06:14:03,154 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:14:07,129 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:51024 2024-12-11T06:14:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742138_1314 (size=349737) 2024-12-11T06:14:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742138_1314 (size=349737) 2024-12-11T06:14:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742138_1314 (size=349737) 2024-12-11T06:14:08,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T06:14:08,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-11T06:14:09,411 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:49686 2024-12-11T06:14:09,411 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:38896 2024-12-11T06:14:10,233 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:38910 2024-12-11T06:14:10,240 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:49692 2024-12-11T06:14:12,472 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:14:16,756 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000003/launch_container.sh] 2024-12-11T06:14:16,757 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000003/container_tokens] 2024-12-11T06:14:16,757 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000003/sysfs] 2024-12-11T06:14:18,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000005/launch_container.sh] 2024-12-11T06:14:18,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000005/container_tokens] 2024-12-11T06:14:18,206 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000005/sysfs] 2024-12-11T06:14:18,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742139_1315 (size=31811) 2024-12-11T06:14:18,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742139_1315 (size=31811) 2024-12-11T06:14:18,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742139_1315 (size=31811) 2024-12-11T06:14:18,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742140_1316 (size=463) 2024-12-11T06:14:18,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742140_1316 (size=463) 2024-12-11T06:14:18,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742140_1316 (size=463) 2024-12-11T06:14:18,788 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000004/launch_container.sh] 2024-12-11T06:14:18,789 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000004/container_tokens] 2024-12-11T06:14:18,789 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000004/sysfs] 2024-12-11T06:14:18,867 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000002/launch_container.sh] 2024-12-11T06:14:18,867 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000002/container_tokens] 2024-12-11T06:14:18,867 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000002/sysfs] 2024-12-11T06:14:18,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742141_1317 (size=31811) 2024-12-11T06:14:18,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742141_1317 (size=31811) 2024-12-11T06:14:18,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742141_1317 (size=31811) 2024-12-11T06:14:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742142_1318 (size=349737) 2024-12-11T06:14:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742142_1318 (size=349737) 2024-12-11T06:14:18,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742142_1318 (size=349737) 2024-12-11T06:14:18,954 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:50728 2024-12-11T06:14:18,964 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:50738 2024-12-11T06:14:20,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:14:20,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:14:20,570 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T06:14:20,570 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:14:20,570 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:14:20,570 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T06:14:20,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T06:14:20,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T06:14:20,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@6b7ca24a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T06:14:20,596 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T06:14:20,596 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T06:14:20,608 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:20,703 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:20,703 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@6b7ca24a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T06:14:20,729 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:14:20,762 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-11T06:14:20,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:20,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:20,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-2614691904473648715.jar 2024-12-11T06:14:22,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-4941144794825347034.jar 2024-12-11T06:14:22,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,299 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:22,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:14:22,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:14:22,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:14:22,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:14:22,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:14:22,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:14:22,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:14:22,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:14:22,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:14:22,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:14:22,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:14:22,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:22,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:22,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:22,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:22,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:22,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:22,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:22,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742143_1319 (size=131440) 2024-12-11T06:14:22,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742143_1319 (size=131440) 2024-12-11T06:14:22,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742143_1319 (size=131440) 2024-12-11T06:14:22,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T06:14:22,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T06:14:22,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742144_1320 (size=4188619) 2024-12-11T06:14:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T06:14:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T06:14:22,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742145_1321 (size=1323991) 2024-12-11T06:14:22,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742146_1322 (size=903542) 2024-12-11T06:14:22,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742146_1322 (size=903542) 2024-12-11T06:14:22,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742146_1322 (size=903542) 2024-12-11T06:14:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T06:14:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T06:14:22,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742147_1323 (size=8360360) 2024-12-11T06:14:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742148_1324 (size=1877034) 2024-12-11T06:14:22,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742148_1324 (size=1877034) 2024-12-11T06:14:22,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742148_1324 (size=1877034) 2024-12-11T06:14:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742149_1325 (size=77835) 2024-12-11T06:14:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742149_1325 (size=77835) 2024-12-11T06:14:22,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742149_1325 (size=77835) 2024-12-11T06:14:22,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742150_1326 (size=30949) 2024-12-11T06:14:22,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742150_1326 (size=30949) 2024-12-11T06:14:22,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742150_1326 (size=30949) 2024-12-11T06:14:22,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742151_1327 (size=1597213) 2024-12-11T06:14:22,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742151_1327 (size=1597213) 2024-12-11T06:14:22,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742151_1327 (size=1597213) 2024-12-11T06:14:22,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742152_1328 (size=4695811) 2024-12-11T06:14:22,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742152_1328 (size=4695811) 2024-12-11T06:14:22,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742152_1328 (size=4695811) 2024-12-11T06:14:22,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742153_1329 (size=232957) 2024-12-11T06:14:22,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742153_1329 (size=232957) 2024-12-11T06:14:22,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742153_1329 (size=232957) 2024-12-11T06:14:22,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742154_1330 (size=127628) 2024-12-11T06:14:22,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742154_1330 (size=127628) 2024-12-11T06:14:22,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742154_1330 (size=127628) 2024-12-11T06:14:22,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742155_1331 (size=20406) 2024-12-11T06:14:22,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742155_1331 (size=20406) 2024-12-11T06:14:22,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742155_1331 (size=20406) 2024-12-11T06:14:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742156_1332 (size=5175431) 2024-12-11T06:14:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742156_1332 (size=5175431) 2024-12-11T06:14:22,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742156_1332 (size=5175431) 2024-12-11T06:14:22,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742157_1333 (size=217634) 2024-12-11T06:14:22,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742157_1333 (size=217634) 2024-12-11T06:14:22,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742157_1333 (size=217634) 2024-12-11T06:14:22,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742158_1334 (size=1832290) 2024-12-11T06:14:22,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742158_1334 (size=1832290) 2024-12-11T06:14:22,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742158_1334 (size=1832290) 2024-12-11T06:14:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742159_1335 (size=322274) 2024-12-11T06:14:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742159_1335 (size=322274) 2024-12-11T06:14:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742159_1335 (size=322274) 2024-12-11T06:14:23,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742160_1336 (size=503880) 2024-12-11T06:14:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742160_1336 (size=503880) 2024-12-11T06:14:23,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742160_1336 (size=503880) 2024-12-11T06:14:23,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742161_1337 (size=443172) 2024-12-11T06:14:23,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742161_1337 (size=443172) 2024-12-11T06:14:23,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742161_1337 (size=443172) 2024-12-11T06:14:23,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742162_1338 (size=29229) 2024-12-11T06:14:23,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742162_1338 (size=29229) 2024-12-11T06:14:23,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742162_1338 (size=29229) 2024-12-11T06:14:23,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742163_1339 (size=6426220) 2024-12-11T06:14:23,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742163_1339 (size=6426220) 2024-12-11T06:14:23,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742163_1339 (size=6426220) 2024-12-11T06:14:23,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742164_1340 (size=24096) 2024-12-11T06:14:23,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742164_1340 (size=24096) 2024-12-11T06:14:23,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742164_1340 (size=24096) 2024-12-11T06:14:23,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742165_1341 (size=111872) 2024-12-11T06:14:23,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742165_1341 (size=111872) 2024-12-11T06:14:23,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742165_1341 (size=111872) 2024-12-11T06:14:23,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742166_1342 (size=45609) 2024-12-11T06:14:23,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742166_1342 (size=45609) 2024-12-11T06:14:23,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742166_1342 (size=45609) 2024-12-11T06:14:23,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742167_1343 (size=136454) 2024-12-11T06:14:23,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742167_1343 (size=136454) 2024-12-11T06:14:23,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742167_1343 (size=136454) 2024-12-11T06:14:23,239 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:14:23,241 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-11T06:14:23,243 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-11T06:14:23,243 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-11T06:14:23,243 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-11T06:14:23,243 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T06:14:23,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742168_1344 (size=1023) 2024-12-11T06:14:23,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742168_1344 (size=1023) 2024-12-11T06:14:23,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742168_1344 (size=1023) 2024-12-11T06:14:23,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742169_1345 (size=35) 2024-12-11T06:14:23,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742169_1345 (size=35) 2024-12-11T06:14:23,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742169_1345 (size=35) 2024-12-11T06:14:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742170_1346 (size=304041) 2024-12-11T06:14:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742170_1346 (size=304041) 2024-12-11T06:14:23,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742170_1346 (size=304041) 2024-12-11T06:14:25,061 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:25,061 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:25,066 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0005_000001 (auth:SIMPLE) from 127.0.0.1:46260 2024-12-11T06:14:25,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000001/launch_container.sh] 2024-12-11T06:14:25,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000001/container_tokens] 2024-12-11T06:14:25,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0005/container_1733897436125_0005_01_000001/sysfs] 2024-12-11T06:14:25,827 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:55114 2024-12-11T06:14:26,296 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:14:30,916 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:60922 2024-12-11T06:14:31,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742171_1347 (size=349739) 2024-12-11T06:14:31,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742171_1347 (size=349739) 2024-12-11T06:14:31,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742171_1347 (size=349739) 2024-12-11T06:14:32,022 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0d420ffea61e34799c9676e7af0f3a9e changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:14:32,022 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3ca44cdf05d63e72dd38ad533ec63381 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:14:33,199 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:37340 2024-12-11T06:14:33,200 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:36756 2024-12-11T06:14:34,012 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:37356 2024-12-11T06:14:34,013 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:36762 2024-12-11T06:14:36,068 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:14:41,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000005/launch_container.sh] 2024-12-11T06:14:41,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000005/container_tokens] 2024-12-11T06:14:41,349 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000005/sysfs] 2024-12-11T06:14:41,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742172_1348 (size=29750) 2024-12-11T06:14:41,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742172_1348 (size=29750) 2024-12-11T06:14:41,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742172_1348 (size=29750) 2024-12-11T06:14:41,982 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000002/launch_container.sh] 2024-12-11T06:14:41,994 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000002/container_tokens] 2024-12-11T06:14:41,994 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000002/sysfs] 2024-12-11T06:14:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742173_1349 (size=463) 2024-12-11T06:14:42,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742173_1349 (size=463) 2024-12-11T06:14:42,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742173_1349 (size=463) 2024-12-11T06:14:42,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742174_1350 (size=29750) 2024-12-11T06:14:42,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742174_1350 (size=29750) 2024-12-11T06:14:42,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742174_1350 (size=29750) 2024-12-11T06:14:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742175_1351 (size=349739) 2024-12-11T06:14:42,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742175_1351 (size=349739) 2024-12-11T06:14:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742175_1351 (size=349739) 2024-12-11T06:14:43,413 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3ca44cdf05d63e72dd38ad533ec63381, had cached 0 bytes from a total of 14855 2024-12-11T06:14:43,413 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0d420ffea61e34799c9676e7af0f3a9e, had cached 0 bytes from a total of 5906 2024-12-11T06:14:43,701 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:14:43,702 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:14:43,722 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-11T06:14:43,722 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:14:43,723 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:14:43,723 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T06:14:43,741 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T06:14:43,741 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T06:14:43,741 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@6b7ca24a in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-11T06:14:43,742 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-11T06:14:43,742 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897639667/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-11T06:14:43,783 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-11T06:14:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:43,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T06:14:43,788 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897683788"}]},"ts":"1733897683788"} 2024-12-11T06:14:43,791 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-11T06:14:43,791 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-11T06:14:43,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-11T06:14:43,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, UNASSIGN}] 2024-12-11T06:14:43,796 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, UNASSIGN 2024-12-11T06:14:43,796 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, UNASSIGN 2024-12-11T06:14:43,797 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0d420ffea61e34799c9676e7af0f3a9e, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:43,798 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=3ca44cdf05d63e72dd38ad533ec63381, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:43,800 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, UNASSIGN because future has completed 2024-12-11T06:14:43,800 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:14:43,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:14:43,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, UNASSIGN because future has completed 2024-12-11T06:14:43,802 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:14:43,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:14:43,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T06:14:43,954 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:14:43,955 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:14:43,955 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 0d420ffea61e34799c9676e7af0f3a9e, disabling compactions & flushes 2024-12-11T06:14:43,955 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:14:43,955 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:14:43,955 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. after waiting 0 ms 2024-12-11T06:14:43,955 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:14:43,956 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:14:43,956 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:14:43,956 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 3ca44cdf05d63e72dd38ad533ec63381, disabling compactions & flushes 2024-12-11T06:14:43,956 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:14:43,957 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:14:43,957 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. after waiting 0 ms 2024-12-11T06:14:43,957 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:14:44,019 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:14:44,020 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:14:44,020 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381. 2024-12-11T06:14:44,020 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 3ca44cdf05d63e72dd38ad533ec63381: Waiting for close lock at 1733897683956Running coprocessor pre-close hooks at 1733897683956Disabling compacts and flushes for region at 1733897683956Disabling writes for close at 1733897683957 (+1 ms)Writing region close event to WAL at 1733897683968 (+11 ms)Running coprocessor post-close hooks at 1733897684019 (+51 ms)Closed at 1733897684020 (+1 ms) 2024-12-11T06:14:44,023 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:14:44,024 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=3ca44cdf05d63e72dd38ad533ec63381, regionState=CLOSED 2024-12-11T06:14:44,026 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:14:44,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:14:44,027 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:14:44,027 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e. 2024-12-11T06:14:44,027 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 0d420ffea61e34799c9676e7af0f3a9e: Waiting for close lock at 1733897683955Running coprocessor pre-close hooks at 1733897683955Disabling compacts and flushes for region at 1733897683955Disabling writes for close at 1733897683955Writing region close event to WAL at 1733897683956 (+1 ms)Running coprocessor post-close hooks at 1733897684027 (+71 ms)Closed at 1733897684027 2024-12-11T06:14:44,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-11T06:14:44,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 3ca44cdf05d63e72dd38ad533ec63381, server=b66397bb9319,42579,1733897428663 in 225 msec 2024-12-11T06:14:44,031 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:14:44,032 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=0d420ffea61e34799c9676e7af0f3a9e, regionState=CLOSED 2024-12-11T06:14:44,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3ca44cdf05d63e72dd38ad533ec63381, UNASSIGN in 236 msec 2024-12-11T06:14:44,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:14:44,044 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-11T06:14:44,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 0d420ffea61e34799c9676e7af0f3a9e, server=b66397bb9319,35939,1733897428441 in 241 msec 2024-12-11T06:14:44,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-12-11T06:14:44,048 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=0d420ffea61e34799c9676e7af0f3a9e, UNASSIGN in 250 msec 2024-12-11T06:14:44,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-11T06:14:44,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 257 msec 2024-12-11T06:14:44,053 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897684053"}]},"ts":"1733897684053"} 2024-12-11T06:14:44,058 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-11T06:14:44,058 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-11T06:14:44,062 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 276 msec 2024-12-11T06:14:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-11T06:14:44,108 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T06:14:44,109 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-11T06:14:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,112 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-11T06:14:44,115 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,117 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-11T06:14:44,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,121 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T06:14:44,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T06:14:44,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T06:14:44,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-11T06:14:44,123 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-11T06:14:44,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-11T06:14:44,128 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:14:44,128 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:14:44,130 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/recovered.edits] 2024-12-11T06:14:44,131 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/recovered.edits] 2024-12-11T06:14:44,138 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/cf/de0cb93ac9f84eb4b02e89c5292f84e2 2024-12-11T06:14:44,137 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/cf/166106fd68014bdda133be0a0e6881be 2024-12-11T06:14:44,158 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381/recovered.edits/9.seqid 2024-12-11T06:14:44,159 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:14:44,170 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e/recovered.edits/9.seqid 2024-12-11T06:14:44,171 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testConsecutiveExports/0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:14:44,171 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-11T06:14:44,172 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-11T06:14:44,173 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-11T06:14:44,194 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024121105b8ff1b2622405b83661c60c30ebd61_3ca44cdf05d63e72dd38ad533ec63381 2024-12-11T06:14:44,201 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241211c622e99a259f46b6be877d0fd33ce207_0d420ffea61e34799c9676e7af0f3a9e 2024-12-11T06:14:44,202 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-11T06:14:44,205 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,209 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-11T06:14:44,212 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-11T06:14:44,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-11T06:14:44,214 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897684214"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:44,214 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897684214"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:44,216 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:14:44,216 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0d420ffea61e34799c9676e7af0f3a9e, NAME => 'testtb-testConsecutiveExports,,1733897637980.0d420ffea61e34799c9676e7af0f3a9e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3ca44cdf05d63e72dd38ad533ec63381, NAME => 'testtb-testConsecutiveExports,1,1733897637980.3ca44cdf05d63e72dd38ad533ec63381.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:14:44,216 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-11T06:14:44,217 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897684216"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:44,218 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-11T06:14:44,219 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-11T06:14:44,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 110 msec 2024-12-11T06:14:44,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-11T06:14:44,227 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-11T06:14:44,227 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-11T06:14:44,236 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T06:14:44,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-11T06:14:44,240 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-11T06:14:44,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-11T06:14:44,269 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=813 (was 798) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45501 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:43910 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Thread-5394 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-683469906_1 at /127.0.0.1:46788 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:38184 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45501 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42259 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45391 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 12845) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:60078 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-683469906_1 at /127.0.0.1:52434 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=777 (was 783), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=877 (was 818) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 24), AvailableMemoryMB=1302 (was 1649) 2024-12-11T06:14:44,269 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-11T06:14:44,290 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=813, OpenFileDescriptor=777, MaxFileDescriptor=1048576, SystemLoadAverage=877, ProcessCount=23, AvailableMemoryMB=1300 2024-12-11T06:14:44,290 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-11T06:14:44,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:14:44,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:44,295 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:14:44,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-11T06:14:44,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T06:14:44,297 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:14:44,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742176_1352 (size=458) 2024-12-11T06:14:44,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742176_1352 (size=458) 2024-12-11T06:14:44,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742176_1352 (size=458) 2024-12-11T06:14:44,342 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 99b5060b61ba875b95902ed3a94eda94, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:44,343 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => f89cd9328508cc3f2890bb3484b5a573, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:44,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742177_1353 (size=83) 2024-12-11T06:14:44,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742177_1353 (size=83) 2024-12-11T06:14:44,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742178_1354 (size=83) 2024-12-11T06:14:44,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742177_1353 (size=83) 2024-12-11T06:14:44,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742178_1354 (size=83) 2024-12-11T06:14:44,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742178_1354 (size=83) 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 99b5060b61ba875b95902ed3a94eda94, disabling compactions & flushes 2024-12-11T06:14:44,372 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing f89cd9328508cc3f2890bb3484b5a573, disabling compactions & flushes 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,372 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. after waiting 0 ms 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. after waiting 0 ms 2024-12-11T06:14:44,372 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,372 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 99b5060b61ba875b95902ed3a94eda94: Waiting for close lock at 1733897684372Disabling compacts and flushes for region at 1733897684372Disabling writes for close at 1733897684372Writing region close event to WAL at 1733897684372Closed at 1733897684372 2024-12-11T06:14:44,372 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for f89cd9328508cc3f2890bb3484b5a573: Waiting for close lock at 1733897684372Disabling compacts and flushes for region at 1733897684372Disabling writes for close at 1733897684372Writing region close event to WAL at 1733897684372Closed at 1733897684372 2024-12-11T06:14:44,374 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:14:44,374 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733897684374"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897684374"}]},"ts":"1733897684374"} 2024-12-11T06:14:44,375 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733897684374"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897684374"}]},"ts":"1733897684374"} 2024-12-11T06:14:44,378 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:14:44,379 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:14:44,379 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897684379"}]},"ts":"1733897684379"} 2024-12-11T06:14:44,383 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-11T06:14:44,384 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:14:44,385 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:14:44,385 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:14:44,385 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:14:44,385 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:14:44,386 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:14:44,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, ASSIGN}] 2024-12-11T06:14:44,388 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, ASSIGN 2024-12-11T06:14:44,389 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, ASSIGN 2024-12-11T06:14:44,389 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:14:44,390 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:14:44,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T06:14:44,416 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000003/launch_container.sh] 2024-12-11T06:14:44,416 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000003/container_tokens] 2024-12-11T06:14:44,416 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000003/sysfs] 2024-12-11T06:14:44,540 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:14:44,540 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=f89cd9328508cc3f2890bb3484b5a573, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:14:44,541 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=99b5060b61ba875b95902ed3a94eda94, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:44,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, ASSIGN because future has completed 2024-12-11T06:14:44,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:14:44,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, ASSIGN because future has completed 2024-12-11T06:14:44,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:14:44,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T06:14:44,703 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,704 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 99b5060b61ba875b95902ed3a94eda94, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:14:44,704 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. service=AccessControlService 2024-12-11T06:14:44,705 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:14:44,705 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,705 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:44,705 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,705 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,710 INFO [StoreOpener-99b5060b61ba875b95902ed3a94eda94-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,711 INFO [StoreOpener-99b5060b61ba875b95902ed3a94eda94-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99b5060b61ba875b95902ed3a94eda94 columnFamilyName cf 2024-12-11T06:14:44,713 DEBUG [StoreOpener-99b5060b61ba875b95902ed3a94eda94-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:44,714 INFO [StoreOpener-99b5060b61ba875b95902ed3a94eda94-1 {}] regionserver.HStore(327): Store=99b5060b61ba875b95902ed3a94eda94/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:14:44,715 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,716 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,716 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,717 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,717 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,718 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,718 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => f89cd9328508cc3f2890bb3484b5a573, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. service=AccessControlService 2024-12-11T06:14:44,719 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,719 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,721 INFO [StoreOpener-f89cd9328508cc3f2890bb3484b5a573-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,724 INFO [StoreOpener-f89cd9328508cc3f2890bb3484b5a573-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f89cd9328508cc3f2890bb3484b5a573 columnFamilyName cf 2024-12-11T06:14:44,726 DEBUG [StoreOpener-f89cd9328508cc3f2890bb3484b5a573-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:44,726 INFO [StoreOpener-f89cd9328508cc3f2890bb3484b5a573-1 {}] regionserver.HStore(327): Store=f89cd9328508cc3f2890bb3484b5a573/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:14:44,727 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,728 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,728 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,728 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,728 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,730 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,741 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:14:44,741 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:14:44,741 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened f89cd9328508cc3f2890bb3484b5a573; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68871638, jitterRate=0.02626737952232361}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:14:44,741 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 99b5060b61ba875b95902ed3a94eda94; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64191835, jitterRate=-0.043467119336128235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:14:44,741 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,741 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:44,742 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for f89cd9328508cc3f2890bb3484b5a573: Running coprocessor pre-open hook at 1733897684720Writing region info on filesystem at 1733897684720Initializing all the Stores at 1733897684721 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897684721Cleaning up temporary data from old regions at 1733897684728 (+7 ms)Running coprocessor post-open hooks at 1733897684741 (+13 ms)Region opened successfully at 1733897684742 (+1 ms) 2024-12-11T06:14:44,742 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 99b5060b61ba875b95902ed3a94eda94: Running coprocessor pre-open hook at 1733897684705Writing region info on filesystem at 1733897684705Initializing all the Stores at 1733897684706 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897684706Cleaning up temporary data from old regions at 1733897684717 (+11 ms)Running coprocessor post-open hooks at 1733897684741 (+24 ms)Region opened successfully at 1733897684742 (+1 ms) 2024-12-11T06:14:44,743 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94., pid=136, masterSystemTime=1733897684700 2024-12-11T06:14:44,744 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573., pid=135, masterSystemTime=1733897684698 2024-12-11T06:14:44,745 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,745 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,746 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=99b5060b61ba875b95902ed3a94eda94, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:44,748 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,748 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:44,749 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=f89cd9328508cc3f2890bb3484b5a573, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:14:44,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:14:44,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:14:44,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-11T06:14:44,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-11T06:14:44,755 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597 in 208 msec 2024-12-11T06:14:44,756 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663 in 204 msec 2024-12-11T06:14:44,756 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, ASSIGN in 368 msec 2024-12-11T06:14:44,757 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-11T06:14:44,757 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, ASSIGN in 369 msec 2024-12-11T06:14:44,758 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:14:44,758 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897684758"}]},"ts":"1733897684758"} 2024-12-11T06:14:44,759 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-11T06:14:44,760 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:14:44,760 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-11T06:14:44,766 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T06:14:44,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,776 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:44,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:44,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:44,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:44,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:44,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:44,779 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:44,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 486 msec 2024-12-11T06:14:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-11T06:14:44,927 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T06:14:44,928 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:44,932 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:44,932 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:44,932 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:14:44,934 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:44,941 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:44,947 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:44,949 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T06:14:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897684949 (current time:1733897684949). 2024-12-11T06:14:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:14:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T06:14:44,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:14:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3585e9a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:44,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:44,952 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:44,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:44,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:44,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ad96dc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:44,952 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:44,953 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,954 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57208, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:44,954 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68973566, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:44,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:44,955 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:44,956 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51802, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:44,958 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@708818, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:44,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:44,962 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:44,963 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2be562f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:44,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,965 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:44,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42ebb075, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:44,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:44,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:44,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:44,968 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51810, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:44,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:14:44,973 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:44,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T06:14:44,974 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:44,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:14:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T06:14:44,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T06:14:44,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T06:14:44,979 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:14:44,980 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:14:44,983 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:14:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742179_1355 (size=215) 2024-12-11T06:14:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742179_1355 (size=215) 2024-12-11T06:14:44,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742179_1355 (size=215) 2024-12-11T06:14:44,995 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:14:44,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573}] 2024-12-11T06:14:44,996 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:44,996 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T06:14:45,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-11T06:14:45,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for f89cd9328508cc3f2890bb3484b5a573: 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 99b5060b61ba875b95902ed3a94eda94: 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:14:45,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:14:45,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742180_1356 (size=86) 2024-12-11T06:14:45,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742180_1356 (size=86) 2024-12-11T06:14:45,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742180_1356 (size=86) 2024-12-11T06:14:45,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:45,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-11T06:14:45,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-11T06:14:45,164 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,164 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 in 170 msec 2024-12-11T06:14:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742181_1357 (size=86) 2024-12-11T06:14:45,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742181_1357 (size=86) 2024-12-11T06:14:45,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742181_1357 (size=86) 2024-12-11T06:14:45,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:45,174 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-11T06:14:45,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-11T06:14:45,175 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,175 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-11T06:14:45,178 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:14:45,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 in 181 msec 2024-12-11T06:14:45,179 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:14:45,180 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:14:45,180 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:14:45,181 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:45,181 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:14:45,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742182_1358 (size=78) 2024-12-11T06:14:45,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742182_1358 (size=78) 2024-12-11T06:14:45,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742182_1358 (size=78) 2024-12-11T06:14:45,189 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:14:45,189 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,189 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742183_1359 (size=713) 2024-12-11T06:14:45,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742183_1359 (size=713) 2024-12-11T06:14:45,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742183_1359 (size=713) 2024-12-11T06:14:45,205 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:14:45,210 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:14:45,211 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,212 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:14:45,212 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-11T06:14:45,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 236 msec 2024-12-11T06:14:45,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-11T06:14:45,297 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T06:14:45,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:14:45,309 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:14:45,311 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:45,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:45,315 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:14:45,317 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:45,327 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:45,334 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-11T06:14:45,337 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T06:14:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897685337 (current time:1733897685337). 2024-12-11T06:14:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:14:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-11T06:14:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:14:45,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e25cd1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:45,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:45,339 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e662bfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:45,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:45,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,341 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57238, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:45,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6e9da6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:45,343 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:45,344 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:45,345 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:45,347 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:45,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:45,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,348 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:45,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6ef656, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:45,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:45,357 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:45,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:45,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:45,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ed5ff0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:45,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:45,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,360 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57250, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:45,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ad7007, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:45,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:45,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:45,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:45,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:45,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:14:45,368 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:45,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:45,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:45,368 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:45,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-11T06:14:45,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:14:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-11T06:14:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T06:14:45,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T06:14:45,372 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:14:45,374 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:14:45,376 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:14:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742184_1360 (size=210) 2024-12-11T06:14:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742184_1360 (size=210) 2024-12-11T06:14:45,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742184_1360 (size=210) 2024-12-11T06:14:45,391 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:14:45,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573}] 2024-12-11T06:14:45,392 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,393 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T06:14:45,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-11T06:14:45,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-11T06:14:45,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:45,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:45,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing f89cd9328508cc3f2890bb3484b5a573 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T06:14:45,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 99b5060b61ba875b95902ed3a94eda94 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T06:14:45,566 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 is 71, key is 1b4d3a6ec98940be694403c9725c6d10/cf:q/1733897685309/Put/seqid=0 2024-12-11T06:14:45,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 is 71, key is 03b244ee62f75f75f5e490b393bcbd49/cf:q/1733897685306/Put/seqid=0 2024-12-11T06:14:45,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742185_1361 (size=8102) 2024-12-11T06:14:45,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742185_1361 (size=8102) 2024-12-11T06:14:45,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742185_1361 (size=8102) 2024-12-11T06:14:45,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:45,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742186_1362 (size=5172) 2024-12-11T06:14:45,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742186_1362 (size=5172) 2024-12-11T06:14:45,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742186_1362 (size=5172) 2024-12-11T06:14:45,582 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:45,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/.tmp/cf/10faded84e0f4b279ee342198121ce65, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=f89cd9328508cc3f2890bb3484b5a573] 2024-12-11T06:14:45,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/.tmp/cf/10faded84e0f4b279ee342198121ce65 is 224, key is 1ae59761a4bf61e3d99ffbbb27a6c8be9/cf:q/1733897685309/Put/seqid=0 2024-12-11T06:14:45,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/.tmp/cf/e2c547a0df3b4abdba800125927ed443, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=99b5060b61ba875b95902ed3a94eda94] 2024-12-11T06:14:45,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/.tmp/cf/e2c547a0df3b4abdba800125927ed443 is 224, key is 08f664c6b4cd28beb598f10d16a7cf362/cf:q/1733897685306/Put/seqid=0 2024-12-11T06:14:45,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742187_1363 (size=6198) 2024-12-11T06:14:45,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742187_1363 (size=6198) 2024-12-11T06:14:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742187_1363 (size=6198) 2024-12-11T06:14:45,594 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/.tmp/cf/e2c547a0df3b4abdba800125927ed443 2024-12-11T06:14:45,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/.tmp/cf/e2c547a0df3b4abdba800125927ed443 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443 2024-12-11T06:14:45,604 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443, entries=4, sequenceid=6, filesize=6.1 K 2024-12-11T06:14:45,605 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 99b5060b61ba875b95902ed3a94eda94 in 60ms, sequenceid=6, compaction requested=false 2024-12-11T06:14:45,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 99b5060b61ba875b95902ed3a94eda94: 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443] hfiles 2024-12-11T06:14:45,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742188_1364 (size=15499) 2024-12-11T06:14:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742188_1364 (size=15499) 2024-12-11T06:14:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742188_1364 (size=15499) 2024-12-11T06:14:45,618 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/.tmp/cf/10faded84e0f4b279ee342198121ce65 2024-12-11T06:14:45,624 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/.tmp/cf/10faded84e0f4b279ee342198121ce65 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65 2024-12-11T06:14:45,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65, entries=46, sequenceid=6, filesize=15.1 K 2024-12-11T06:14:45,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for f89cd9328508cc3f2890bb3484b5a573 in 85ms, sequenceid=6, compaction requested=false 2024-12-11T06:14:45,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for f89cd9328508cc3f2890bb3484b5a573: 2024-12-11T06:14:45,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-11T06:14:45,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,630 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:14:45,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65] hfiles 2024-12-11T06:14:45,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742189_1365 (size=125) 2024-12-11T06:14:45,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742189_1365 (size=125) 2024-12-11T06:14:45,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742189_1365 (size=125) 2024-12-11T06:14:45,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:14:45,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-11T06:14:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-11T06:14:45,655 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,655 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 99b5060b61ba875b95902ed3a94eda94 in 266 msec 2024-12-11T06:14:45,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742190_1366 (size=125) 2024-12-11T06:14:45,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742190_1366 (size=125) 2024-12-11T06:14:45,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742190_1366 (size=125) 2024-12-11T06:14:45,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:14:45,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-11T06:14:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-11T06:14:45,673 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,674 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-11T06:14:45,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f89cd9328508cc3f2890bb3484b5a573 in 288 msec 2024-12-11T06:14:45,682 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:14:45,682 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:14:45,684 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:14:45,684 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:14:45,684 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:45,685 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94] hfiles 2024-12-11T06:14:45,685 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:14:45,686 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:14:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T06:14:45,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742191_1367 (size=309) 2024-12-11T06:14:45,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742191_1367 (size=309) 2024-12-11T06:14:45,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742191_1367 (size=309) 2024-12-11T06:14:45,694 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:14:45,694 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,695 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742192_1368 (size=1023) 2024-12-11T06:14:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742192_1368 (size=1023) 2024-12-11T06:14:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742192_1368 (size=1023) 2024-12-11T06:14:45,718 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:14:45,724 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:14:45,724 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:45,726 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:14:45,726 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-11T06:14:45,727 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 357 msec 2024-12-11T06:14:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-11T06:14:45,997 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T06:14:45,999 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:14:45,999 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:14:45,999 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T06:14:46,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48178, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:14:46,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:14:46,001 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T06:14:46,003 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:14:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:46,005 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:14:46,005 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,005 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-11T06:14:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T06:14:46,006 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:14:46,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742193_1369 (size=399) 2024-12-11T06:14:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742193_1369 (size=399) 2024-12-11T06:14:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742193_1369 (size=399) 2024-12-11T06:14:46,015 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2c89f9f2b59ef9af0984314b2fe91cd2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:46,015 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c8af3a894e0aa0c3b4b6571d7cab3112, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:46,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742194_1370 (size=85) 2024-12-11T06:14:46,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742194_1370 (size=85) 2024-12-11T06:14:46,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742194_1370 (size=85) 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 2c89f9f2b59ef9af0984314b2fe91cd2, disabling compactions & flushes 2024-12-11T06:14:46,023 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. after waiting 0 ms 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,023 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,023 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2c89f9f2b59ef9af0984314b2fe91cd2: Waiting for close lock at 1733897686023Disabling compacts and flushes for region at 1733897686023Disabling writes for close at 1733897686023Writing region close event to WAL at 1733897686023Closed at 1733897686023 2024-12-11T06:14:46,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742195_1371 (size=85) 2024-12-11T06:14:46,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742195_1371 (size=85) 2024-12-11T06:14:46,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742195_1371 (size=85) 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing c8af3a894e0aa0c3b4b6571d7cab3112, disabling compactions & flushes 2024-12-11T06:14:46,030 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. after waiting 0 ms 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,030 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,030 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for c8af3a894e0aa0c3b4b6571d7cab3112: Waiting for close lock at 1733897686030Disabling compacts and flushes for region at 1733897686030Disabling writes for close at 1733897686030Writing region close event to WAL at 1733897686030Closed at 1733897686030 2024-12-11T06:14:46,031 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:14:46,031 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733897686031"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897686031"}]},"ts":"1733897686031"} 2024-12-11T06:14:46,031 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733897686031"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897686031"}]},"ts":"1733897686031"} 2024-12-11T06:14:46,033 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:14:46,034 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:14:46,034 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897686034"}]},"ts":"1733897686034"} 2024-12-11T06:14:46,036 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-11T06:14:46,036 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:14:46,037 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:14:46,037 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:14:46,037 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:14:46,037 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:14:46,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, ASSIGN}] 2024-12-11T06:14:46,038 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, ASSIGN 2024-12-11T06:14:46,038 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, ASSIGN 2024-12-11T06:14:46,039 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:14:46,039 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:14:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T06:14:46,189 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:14:46,190 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=2c89f9f2b59ef9af0984314b2fe91cd2, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:46,190 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=c8af3a894e0aa0c3b4b6571d7cab3112, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:46,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, ASSIGN because future has completed 2024-12-11T06:14:46,192 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:14:46,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, ASSIGN because future has completed 2024-12-11T06:14:46,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:14:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T06:14:46,347 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,347 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => c8af3a894e0aa0c3b4b6571d7cab3112, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.', STARTKEY => '2', ENDKEY => ''} 2024-12-11T06:14:46,347 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,347 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. service=AccessControlService 2024-12-11T06:14:46,347 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c89f9f2b59ef9af0984314b2fe91cd2, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.', STARTKEY => '', ENDKEY => '2'} 2024-12-11T06:14:46,348 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. service=AccessControlService 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,348 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,348 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,349 INFO [StoreOpener-2c89f9f2b59ef9af0984314b2fe91cd2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,350 INFO [StoreOpener-c8af3a894e0aa0c3b4b6571d7cab3112-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,351 INFO [StoreOpener-2c89f9f2b59ef9af0984314b2fe91cd2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c89f9f2b59ef9af0984314b2fe91cd2 columnFamilyName cf 2024-12-11T06:14:46,351 INFO [StoreOpener-c8af3a894e0aa0c3b4b6571d7cab3112-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8af3a894e0aa0c3b4b6571d7cab3112 columnFamilyName cf 2024-12-11T06:14:46,351 DEBUG [StoreOpener-2c89f9f2b59ef9af0984314b2fe91cd2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,351 DEBUG [StoreOpener-c8af3a894e0aa0c3b4b6571d7cab3112-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,351 INFO [StoreOpener-2c89f9f2b59ef9af0984314b2fe91cd2-1 {}] regionserver.HStore(327): Store=2c89f9f2b59ef9af0984314b2fe91cd2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:14:46,351 INFO [StoreOpener-c8af3a894e0aa0c3b4b6571d7cab3112-1 {}] regionserver.HStore(327): Store=c8af3a894e0aa0c3b4b6571d7cab3112/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:14:46,352 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,352 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,352 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,352 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,353 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,355 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,355 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:14:46,356 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:14:46,357 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened c8af3a894e0aa0c3b4b6571d7cab3112; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65670334, jitterRate=-0.02143576741218567}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:14:46,357 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,357 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 2c89f9f2b59ef9af0984314b2fe91cd2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69884607, jitterRate=0.041361793875694275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:14:46,357 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,358 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for c8af3a894e0aa0c3b4b6571d7cab3112: Running coprocessor pre-open hook at 1733897686348Writing region info on filesystem at 1733897686348Initializing all the Stores at 1733897686349 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897686349Cleaning up temporary data from old regions at 1733897686353 (+4 ms)Running coprocessor post-open hooks at 1733897686357 (+4 ms)Region opened successfully at 1733897686358 (+1 ms) 2024-12-11T06:14:46,358 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 2c89f9f2b59ef9af0984314b2fe91cd2: Running coprocessor pre-open hook at 1733897686348Writing region info on filesystem at 1733897686348Initializing all the Stores at 1733897686349 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897686349Cleaning up temporary data from old regions at 1733897686353 (+4 ms)Running coprocessor post-open hooks at 1733897686357 (+4 ms)Region opened successfully at 1733897686358 (+1 ms) 2024-12-11T06:14:46,358 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112., pid=146, masterSystemTime=1733897686344 2024-12-11T06:14:46,358 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2., pid=147, masterSystemTime=1733897686345 2024-12-11T06:14:46,360 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,360 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,361 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=2c89f9f2b59ef9af0984314b2fe91cd2, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:46,361 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,361 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,361 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=c8af3a894e0aa0c3b4b6571d7cab3112, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:46,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:14:46,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:14:46,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-11T06:14:46,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441 in 170 msec 2024-12-11T06:14:46,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-11T06:14:46,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663 in 172 msec 2024-12-11T06:14:46,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, ASSIGN in 328 msec 2024-12-11T06:14:46,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-11T06:14:46,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, ASSIGN in 329 msec 2024-12-11T06:14:46,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:14:46,369 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897686369"}]},"ts":"1733897686369"} 2024-12-11T06:14:46,371 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-11T06:14:46,372 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:14:46,372 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-11T06:14:46,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T06:14:46,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:46,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:46,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:46,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,379 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,380 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,380 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-11T06:14:46,381 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 376 msec 2024-12-11T06:14:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-11T06:14:46,637 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T06:14:46,639 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:14:46,643 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112., hostname=b66397bb9319,42579,1733897428663, seqNum=2] 2024-12-11T06:14:46,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-11T06:14:46,658 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112] 2024-12-11T06:14:46,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112], force=true 2024-12-11T06:14:46,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112], force=true 2024-12-11T06:14:46,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112], force=true 2024-12-11T06:14:46,664 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112], force=true 2024-12-11T06:14:46,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T06:14:46,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, UNASSIGN}] 2024-12-11T06:14:46,672 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, UNASSIGN 2024-12-11T06:14:46,672 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, UNASSIGN 2024-12-11T06:14:46,673 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=2c89f9f2b59ef9af0984314b2fe91cd2, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:46,673 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=c8af3a894e0aa0c3b4b6571d7cab3112, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:14:46,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, UNASSIGN because future has completed 2024-12-11T06:14:46,675 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T06:14:46,675 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:14:46,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, UNASSIGN because future has completed 2024-12-11T06:14:46,676 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-11T06:14:46,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:14:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T06:14:46,827 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,827 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T06:14:46,827 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 2c89f9f2b59ef9af0984314b2fe91cd2, disabling compactions & flushes 2024-12-11T06:14:46,827 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,827 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,827 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. after waiting 0 ms 2024-12-11T06:14:46,827 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,828 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 2c89f9f2b59ef9af0984314b2fe91cd2 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T06:14:46,829 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,829 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-11T06:14:46,829 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing c8af3a894e0aa0c3b4b6571d7cab3112, disabling compactions & flushes 2024-12-11T06:14:46,829 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,829 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,830 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. after waiting 0 ms 2024-12-11T06:14:46,830 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,830 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing c8af3a894e0aa0c3b4b6571d7cab3112 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-11T06:14:46,845 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/.tmp/cf/9ba9e84e42ec4b0d8272518a127c4044 is 28, key is 2/cf:/1733897686644/Put/seqid=0 2024-12-11T06:14:46,845 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/.tmp/cf/edc079f830124bce8b051c1df7297442 is 28, key is 1/cf:/1733897686640/Put/seqid=0 2024-12-11T06:14:46,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742197_1373 (size=4945) 2024-12-11T06:14:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742196_1372 (size=4945) 2024-12-11T06:14:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742197_1373 (size=4945) 2024-12-11T06:14:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742197_1373 (size=4945) 2024-12-11T06:14:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742196_1372 (size=4945) 2024-12-11T06:14:46,854 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/.tmp/cf/edc079f830124bce8b051c1df7297442 2024-12-11T06:14:46,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742196_1372 (size=4945) 2024-12-11T06:14:46,855 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/.tmp/cf/9ba9e84e42ec4b0d8272518a127c4044 2024-12-11T06:14:46,860 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/.tmp/cf/edc079f830124bce8b051c1df7297442 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442 2024-12-11T06:14:46,860 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/.tmp/cf/9ba9e84e42ec4b0d8272518a127c4044 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044 2024-12-11T06:14:46,865 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T06:14:46,865 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442, entries=1, sequenceid=5, filesize=4.8 K 2024-12-11T06:14:46,866 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2c89f9f2b59ef9af0984314b2fe91cd2 in 39ms, sequenceid=5, compaction requested=false 2024-12-11T06:14:46,866 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c8af3a894e0aa0c3b4b6571d7cab3112 in 36ms, sequenceid=5, compaction requested=false 2024-12-11T06:14:46,866 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-11T06:14:46,866 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-11T06:14:46,870 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:14:46,870 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:14:46,870 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. 2024-12-11T06:14:46,870 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:14:46,870 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 2c89f9f2b59ef9af0984314b2fe91cd2: Waiting for close lock at 1733897686827Running coprocessor pre-close hooks at 1733897686827Disabling compacts and flushes for region at 1733897686827Disabling writes for close at 1733897686827Obtaining lock to block concurrent updates at 1733897686828 (+1 ms)Preparing flush snapshotting stores in 2c89f9f2b59ef9af0984314b2fe91cd2 at 1733897686828Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733897686828Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2. at 1733897686828Flushing 2c89f9f2b59ef9af0984314b2fe91cd2/cf: creating writer at 1733897686829 (+1 ms)Flushing 2c89f9f2b59ef9af0984314b2fe91cd2/cf: appending metadata at 1733897686844 (+15 ms)Flushing 2c89f9f2b59ef9af0984314b2fe91cd2/cf: closing flushed file at 1733897686844Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@122f2219: reopening flushed file at 1733897686859 (+15 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 2c89f9f2b59ef9af0984314b2fe91cd2 in 39ms, sequenceid=5, compaction requested=false at 1733897686866 (+7 ms)Writing region close event to WAL at 1733897686867 (+1 ms)Running coprocessor post-close hooks at 1733897686870 (+3 ms)Closed at 1733897686870 2024-12-11T06:14:46,871 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:14:46,871 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. 2024-12-11T06:14:46,871 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for c8af3a894e0aa0c3b4b6571d7cab3112: Waiting for close lock at 1733897686829Running coprocessor pre-close hooks at 1733897686829Disabling compacts and flushes for region at 1733897686829Disabling writes for close at 1733897686830 (+1 ms)Obtaining lock to block concurrent updates at 1733897686830Preparing flush snapshotting stores in c8af3a894e0aa0c3b4b6571d7cab3112 at 1733897686830Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733897686830Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112. at 1733897686830Flushing c8af3a894e0aa0c3b4b6571d7cab3112/cf: creating writer at 1733897686830Flushing c8af3a894e0aa0c3b4b6571d7cab3112/cf: appending metadata at 1733897686844 (+14 ms)Flushing c8af3a894e0aa0c3b4b6571d7cab3112/cf: closing flushed file at 1733897686844Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b585e12: reopening flushed file at 1733897686860 (+16 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c8af3a894e0aa0c3b4b6571d7cab3112 in 36ms, sequenceid=5, compaction requested=false at 1733897686866 (+6 ms)Writing region close event to WAL at 1733897686867 (+1 ms)Running coprocessor post-close hooks at 1733897686871 (+4 ms)Closed at 1733897686871 2024-12-11T06:14:46,873 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:14:46,873 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=2c89f9f2b59ef9af0984314b2fe91cd2, regionState=CLOSED 2024-12-11T06:14:46,874 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:14:46,874 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=c8af3a894e0aa0c3b4b6571d7cab3112, regionState=CLOSED 2024-12-11T06:14:46,876 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:14:46,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:14:46,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-11T06:14:46,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 2c89f9f2b59ef9af0984314b2fe91cd2, server=b66397bb9319,35939,1733897428441 in 201 msec 2024-12-11T06:14:46,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-11T06:14:46,879 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure c8af3a894e0aa0c3b4b6571d7cab3112, server=b66397bb9319,42579,1733897428663 in 202 msec 2024-12-11T06:14:46,880 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=2c89f9f2b59ef9af0984314b2fe91cd2, UNASSIGN in 207 msec 2024-12-11T06:14:46,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=150, resume processing ppid=148 2024-12-11T06:14:46,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c8af3a894e0aa0c3b4b6571d7cab3112, UNASSIGN in 208 msec 2024-12-11T06:14:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742198_1374 (size=84) 2024-12-11T06:14:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742198_1374 (size=84) 2024-12-11T06:14:46,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742198_1374 (size=84) 2024-12-11T06:14:46,901 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742199_1375 (size=20) 2024-12-11T06:14:46,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742199_1375 (size=20) 2024-12-11T06:14:46,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742199_1375 (size=20) 2024-12-11T06:14:46,910 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742200_1376 (size=21) 2024-12-11T06:14:46,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742200_1376 (size=21) 2024-12-11T06:14:46,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742200_1376 (size=21) 2024-12-11T06:14:46,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742201_1377 (size=84) 2024-12-11T06:14:46,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742201_1377 (size=84) 2024-12-11T06:14:46,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742201_1377 (size=84) 2024-12-11T06:14:46,926 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:46,935 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-11T06:14:46,938 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686002.2c89f9f2b59ef9af0984314b2fe91cd2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:46,938 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733897686002.c8af3a894e0aa0c3b4b6571d7cab3112.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:46,938 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-11T06:14:46,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, ASSIGN}] 2024-12-11T06:14:46,943 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, ASSIGN 2024-12-11T06:14:46,943 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, ASSIGN; state=MERGED, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:14:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T06:14:47,094 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T06:14:47,094 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=366ea7dc0cdd14943ffc9e04b7ea738e, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:47,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, ASSIGN because future has completed 2024-12-11T06:14:47,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:14:47,170 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000004/launch_container.sh] 2024-12-11T06:14:47,170 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000004/container_tokens] 2024-12-11T06:14:47,170 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000004/sysfs] 2024-12-11T06:14:47,251 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:14:47,251 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 366ea7dc0cdd14943ffc9e04b7ea738e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:14:47,252 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. service=AccessControlService 2024-12-11T06:14:47,252 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:14:47,252 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,252 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:14:47,252 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,252 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,254 INFO [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,254 INFO [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 366ea7dc0cdd14943ffc9e04b7ea738e columnFamilyName cf 2024-12-11T06:14:47,255 DEBUG [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:14:47,263 DEBUG [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/9ba9e84e42ec4b0d8272518a127c4044.c8af3a894e0aa0c3b4b6571d7cab3112->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044-top 2024-12-11T06:14:47,267 DEBUG [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/edc079f830124bce8b051c1df7297442.2c89f9f2b59ef9af0984314b2fe91cd2->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442-top 2024-12-11T06:14:47,268 INFO [StoreOpener-366ea7dc0cdd14943ffc9e04b7ea738e-1 {}] regionserver.HStore(327): Store=366ea7dc0cdd14943ffc9e04b7ea738e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:14:47,268 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,269 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,270 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,270 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,270 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,272 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,291 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 366ea7dc0cdd14943ffc9e04b7ea738e; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70339662, jitterRate=0.04814264178276062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:14:47,291 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,291 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 366ea7dc0cdd14943ffc9e04b7ea738e: Running coprocessor pre-open hook at 1733897687252Writing region info on filesystem at 1733897687252Initializing all the Stores at 1733897687253 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897687253Cleaning up temporary data from old regions at 1733897687270 (+17 ms)Running coprocessor post-open hooks at 1733897687291 (+21 ms)Region opened successfully at 1733897687291 2024-12-11T06:14:47,292 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e., pid=154, masterSystemTime=1733897687248 2024-12-11T06:14:47,292 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.,because compaction is disabled. 2024-12-11T06:14:47,294 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:14:47,294 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:14:47,295 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=366ea7dc0cdd14943ffc9e04b7ea738e, regionState=OPEN, openSeqNum=9, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:14:47,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T06:14:47,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:14:47,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-11T06:14:47,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441 in 202 msec 2024-12-11T06:14:47,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-11T06:14:47,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, ASSIGN in 358 msec 2024-12-11T06:14:47,304 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[2c89f9f2b59ef9af0984314b2fe91cd2, c8af3a894e0aa0c3b4b6571d7cab3112], force=true in 642 msec 2024-12-11T06:14:47,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-11T06:14:47,807 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T06:14:47,807 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T06:14:47,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897687807 (current time:1733897687807). 2024-12-11T06:14:47,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:14:47,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-11T06:14:47,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:14:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56b13e54, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:47,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:47,810 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@762275b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:47,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,811 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57260, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:47,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a71441a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:47,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:47,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:47,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:47,815 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:47,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:47,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,816 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:47,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50c3c9df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:14:47,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:14:47,817 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:14:47,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:14:47,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:14:47,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18adf1c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:14:47,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:14:47,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,818 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:14:47,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49b0b85c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:14:47,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:14:47,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:14:47,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:14:47,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:14:47,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:14:47,823 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:14:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:14:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:14:47,824 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:14:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-11T06:14:47,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:14:47,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-11T06:14:47,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T06:14:47,827 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:14:47,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T06:14:47,828 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:14:47,830 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:14:47,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742202_1378 (size=216) 2024-12-11T06:14:47,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742202_1378 (size=216) 2024-12-11T06:14:47,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742202_1378 (size=216) 2024-12-11T06:14:47,837 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:14:47,837 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e}] 2024-12-11T06:14:47,838 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T06:14:47,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 366ea7dc0cdd14943ffc9e04b7ea738e: 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/9ba9e84e42ec4b0d8272518a127c4044.c8af3a894e0aa0c3b4b6571d7cab3112->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044-top, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/edc079f830124bce8b051c1df7297442.2c89f9f2b59ef9af0984314b2fe91cd2->hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442-top] hfiles 2024-12-11T06:14:47,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/9ba9e84e42ec4b0d8272518a127c4044.c8af3a894e0aa0c3b4b6571d7cab3112 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:47,991 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/edc079f830124bce8b051c1df7297442.2c89f9f2b59ef9af0984314b2fe91cd2 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742203_1379 (size=269) 2024-12-11T06:14:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742203_1379 (size=269) 2024-12-11T06:14:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742203_1379 (size=269) 2024-12-11T06:14:47,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:14:47,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-11T06:14:47,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-11T06:14:47,999 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:47,999 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:14:48,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-11T06:14:48,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e in 163 msec 2024-12-11T06:14:48,001 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:14:48,002 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:14:48,003 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:14:48,003 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,003 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742204_1380 (size=670) 2024-12-11T06:14:48,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742204_1380 (size=670) 2024-12-11T06:14:48,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742204_1380 (size=670) 2024-12-11T06:14:48,013 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:14:48,018 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:14:48,018 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,019 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:14:48,020 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-11T06:14:48,021 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 195 msec 2024-12-11T06:14:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:14:48,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-11T06:14:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,056 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-11T06:14:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-11T06:14:48,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-11T06:14:48,147 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T06:14:48,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147 2024-12-11T06:14:48,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:48,183 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:14:48,183 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,185 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:14:48,190 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:14:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742206_1382 (size=670) 2024-12-11T06:14:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742205_1381 (size=216) 2024-12-11T06:14:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742205_1381 (size=216) 2024-12-11T06:14:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742206_1382 (size=670) 2024-12-11T06:14:48,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742206_1382 (size=670) 2024-12-11T06:14:48,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742205_1381 (size=216) 2024-12-11T06:14:48,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:48,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:48,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:48,235 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0006_000001 (auth:SIMPLE) from 127.0.0.1:45714 2024-12-11T06:14:48,246 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000001/launch_container.sh] 2024-12-11T06:14:48,246 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000001/container_tokens] 2024-12-11T06:14:48,247 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0006/container_1733897436125_0006_01_000001/sysfs] 2024-12-11T06:14:49,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-1553823023647774384.jar 2024-12-11T06:14:49,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-18063712287566796922.jar 2024-12-11T06:14:49,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:14:49,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:14:49,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:14:49,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:14:49,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:14:49,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:14:49,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:14:49,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:14:49,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:14:49,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:14:49,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:14:49,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:14:49,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:49,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:49,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:49,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:49,316 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:14:49,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:49,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:14:49,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742207_1383 (size=443172) 2024-12-11T06:14:49,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742207_1383 (size=443172) 2024-12-11T06:14:49,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742207_1383 (size=443172) 2024-12-11T06:14:49,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742208_1384 (size=131440) 2024-12-11T06:14:49,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742208_1384 (size=131440) 2024-12-11T06:14:49,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742208_1384 (size=131440) 2024-12-11T06:14:49,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742209_1385 (size=4188619) 2024-12-11T06:14:49,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742209_1385 (size=4188619) 2024-12-11T06:14:49,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742209_1385 (size=4188619) 2024-12-11T06:14:49,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742210_1386 (size=1323991) 2024-12-11T06:14:49,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742210_1386 (size=1323991) 2024-12-11T06:14:49,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742210_1386 (size=1323991) 2024-12-11T06:14:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742211_1387 (size=903542) 2024-12-11T06:14:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742211_1387 (size=903542) 2024-12-11T06:14:49,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742211_1387 (size=903542) 2024-12-11T06:14:49,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742212_1388 (size=8360360) 2024-12-11T06:14:49,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742212_1388 (size=8360360) 2024-12-11T06:14:49,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742212_1388 (size=8360360) 2024-12-11T06:14:49,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742213_1389 (size=1877034) 2024-12-11T06:14:49,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742213_1389 (size=1877034) 2024-12-11T06:14:49,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742213_1389 (size=1877034) 2024-12-11T06:14:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742214_1390 (size=77835) 2024-12-11T06:14:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742214_1390 (size=77835) 2024-12-11T06:14:49,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742214_1390 (size=77835) 2024-12-11T06:14:49,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742215_1391 (size=30949) 2024-12-11T06:14:49,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742215_1391 (size=30949) 2024-12-11T06:14:49,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742215_1391 (size=30949) 2024-12-11T06:14:49,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742216_1392 (size=1597213) 2024-12-11T06:14:49,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742216_1392 (size=1597213) 2024-12-11T06:14:49,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742216_1392 (size=1597213) 2024-12-11T06:14:49,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742217_1393 (size=4695811) 2024-12-11T06:14:49,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742217_1393 (size=4695811) 2024-12-11T06:14:49,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742217_1393 (size=4695811) 2024-12-11T06:14:49,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742218_1394 (size=232957) 2024-12-11T06:14:49,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742218_1394 (size=232957) 2024-12-11T06:14:49,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742218_1394 (size=232957) 2024-12-11T06:14:49,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742219_1395 (size=127628) 2024-12-11T06:14:49,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742219_1395 (size=127628) 2024-12-11T06:14:49,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742219_1395 (size=127628) 2024-12-11T06:14:49,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742220_1396 (size=20406) 2024-12-11T06:14:49,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742220_1396 (size=20406) 2024-12-11T06:14:49,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742220_1396 (size=20406) 2024-12-11T06:14:49,542 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:14:49,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T06:14:49,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T06:14:49,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742221_1397 (size=5175431) 2024-12-11T06:14:49,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742222_1398 (size=6426220) 2024-12-11T06:14:49,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742222_1398 (size=6426220) 2024-12-11T06:14:49,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742222_1398 (size=6426220) 2024-12-11T06:14:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742223_1399 (size=217634) 2024-12-11T06:14:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742223_1399 (size=217634) 2024-12-11T06:14:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742223_1399 (size=217634) 2024-12-11T06:14:49,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742224_1400 (size=1832290) 2024-12-11T06:14:49,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742224_1400 (size=1832290) 2024-12-11T06:14:49,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742224_1400 (size=1832290) 2024-12-11T06:14:49,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742225_1401 (size=322274) 2024-12-11T06:14:49,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742225_1401 (size=322274) 2024-12-11T06:14:49,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742225_1401 (size=322274) 2024-12-11T06:14:49,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742226_1402 (size=503880) 2024-12-11T06:14:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742226_1402 (size=503880) 2024-12-11T06:14:49,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742226_1402 (size=503880) 2024-12-11T06:14:49,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742227_1403 (size=29229) 2024-12-11T06:14:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742227_1403 (size=29229) 2024-12-11T06:14:49,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742227_1403 (size=29229) 2024-12-11T06:14:49,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742228_1404 (size=24096) 2024-12-11T06:14:49,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742228_1404 (size=24096) 2024-12-11T06:14:49,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742228_1404 (size=24096) 2024-12-11T06:14:49,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742229_1405 (size=111872) 2024-12-11T06:14:49,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742229_1405 (size=111872) 2024-12-11T06:14:49,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742229_1405 (size=111872) 2024-12-11T06:14:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742230_1406 (size=45609) 2024-12-11T06:14:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742230_1406 (size=45609) 2024-12-11T06:14:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742230_1406 (size=45609) 2024-12-11T06:14:49,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742231_1407 (size=136454) 2024-12-11T06:14:49,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742231_1407 (size=136454) 2024-12-11T06:14:49,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742231_1407 (size=136454) 2024-12-11T06:14:49,674 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:14:49,676 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-11T06:14:49,677 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-11T06:14:49,678 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-11T06:14:49,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742232_1408 (size=481) 2024-12-11T06:14:49,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742232_1408 (size=481) 2024-12-11T06:14:49,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742232_1408 (size=481) 2024-12-11T06:14:49,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742233_1409 (size=21) 2024-12-11T06:14:49,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742233_1409 (size=21) 2024-12-11T06:14:49,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742233_1409 (size=21) 2024-12-11T06:14:50,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742234_1410 (size=304055) 2024-12-11T06:14:50,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742234_1410 (size=304055) 2024-12-11T06:14:50,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742234_1410 (size=304055) 2024-12-11T06:14:50,139 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:50,139 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:14:50,237 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:45720 2024-12-11T06:14:55,748 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:51712 2024-12-11T06:14:56,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742235_1411 (size=349753) 2024-12-11T06:14:56,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742235_1411 (size=349753) 2024-12-11T06:14:56,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742235_1411 (size=349753) 2024-12-11T06:14:56,296 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:14:58,027 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:44518 2024-12-11T06:14:58,028 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:60334 2024-12-11T06:15:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742236_1412 (size=4945) 2024-12-11T06:15:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742236_1412 (size=4945) 2024-12-11T06:15:02,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742236_1412 (size=4945) 2024-12-11T06:15:02,675 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000003/launch_container.sh] 2024-12-11T06:15:02,675 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000003/container_tokens] 2024-12-11T06:15:02,675 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000003/sysfs] 2024-12-11T06:15:03,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742238_1414 (size=4945) 2024-12-11T06:15:03,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742238_1414 (size=4945) 2024-12-11T06:15:03,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742238_1414 (size=4945) 2024-12-11T06:15:03,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742237_1413 (size=22246) 2024-12-11T06:15:03,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742237_1413 (size=22246) 2024-12-11T06:15:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742237_1413 (size=22246) 2024-12-11T06:15:03,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742239_1415 (size=483) 2024-12-11T06:15:03,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742239_1415 (size=483) 2024-12-11T06:15:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742239_1415 (size=483) 2024-12-11T06:15:04,009 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000002/launch_container.sh] 2024-12-11T06:15:04,009 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000002/container_tokens] 2024-12-11T06:15:04,009 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000002/sysfs] 2024-12-11T06:15:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742240_1416 (size=22246) 2024-12-11T06:15:04,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742240_1416 (size=22246) 2024-12-11T06:15:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742240_1416 (size=22246) 2024-12-11T06:15:04,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742241_1417 (size=349753) 2024-12-11T06:15:04,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742241_1417 (size=349753) 2024-12-11T06:15:04,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742241_1417 (size=349753) 2024-12-11T06:15:04,179 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:55444 2024-12-11T06:15:05,342 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:15:05,343 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:15:05,397 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,397 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:15:05,398 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:15:05,398 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,399 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T06:15:05,399 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T06:15:05,399 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,399 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-11T06:15:05,399 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897688147/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-11T06:15:05,418 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,424 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897705424"}]},"ts":"1733897705424"} 2024-12-11T06:15:05,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T06:15:05,426 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-11T06:15:05,426 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-11T06:15:05,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-11T06:15:05,430 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, UNASSIGN}] 2024-12-11T06:15:05,431 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, UNASSIGN 2024-12-11T06:15:05,432 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=366ea7dc0cdd14943ffc9e04b7ea738e, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:05,435 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, UNASSIGN because future has completed 2024-12-11T06:15:05,435 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:05,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:05,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T06:15:05,588 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:15:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 366ea7dc0cdd14943ffc9e04b7ea738e, disabling compactions & flushes 2024-12-11T06:15:05,588 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:15:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:15:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. after waiting 0 ms 2024-12-11T06:15:05,588 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:15:05,594 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-11T06:15:05,595 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:05,595 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e. 2024-12-11T06:15:05,595 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 366ea7dc0cdd14943ffc9e04b7ea738e: Waiting for close lock at 1733897705588Running coprocessor pre-close hooks at 1733897705588Disabling compacts and flushes for region at 1733897705588Disabling writes for close at 1733897705588Writing region close event to WAL at 1733897705589 (+1 ms)Running coprocessor post-close hooks at 1733897705595 (+6 ms)Closed at 1733897705595 2024-12-11T06:15:05,597 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:15:05,598 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=366ea7dc0cdd14943ffc9e04b7ea738e, regionState=CLOSED 2024-12-11T06:15:05,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:05,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-11T06:15:05,605 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 366ea7dc0cdd14943ffc9e04b7ea738e, server=b66397bb9319,35939,1733897428441 in 166 msec 2024-12-11T06:15:05,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-11T06:15:05,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=366ea7dc0cdd14943ffc9e04b7ea738e, UNASSIGN in 175 msec 2024-12-11T06:15:05,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-11T06:15:05,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 181 msec 2024-12-11T06:15:05,612 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897705612"}]},"ts":"1733897705612"} 2024-12-11T06:15:05,614 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-11T06:15:05,615 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-11T06:15:05,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 198 msec 2024-12-11T06:15:05,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-11T06:15:05,748 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T06:15:05,748 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,751 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,752 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,755 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,757 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:15:05,757 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:15:05,757 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:15:05,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,760 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/recovered.edits] 2024-12-11T06:15:05,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T06:15:05,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T06:15:05,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T06:15:05,761 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-11T06:15:05,762 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/recovered.edits] 2024-12-11T06:15:05,762 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/recovered.edits] 2024-12-11T06:15:05,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:05,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,763 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:05,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:05,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:05,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-11T06:15:05,769 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:05,769 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:05,769 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:05,769 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:05,774 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/cf/edc079f830124bce8b051c1df7297442 2024-12-11T06:15:05,774 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/9ba9e84e42ec4b0d8272518a127c4044.c8af3a894e0aa0c3b4b6571d7cab3112 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/9ba9e84e42ec4b0d8272518a127c4044.c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:15:05,774 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/edc079f830124bce8b051c1df7297442.2c89f9f2b59ef9af0984314b2fe91cd2 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/cf/edc079f830124bce8b051c1df7297442.2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:15:05,774 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/cf/9ba9e84e42ec4b0d8272518a127c4044 2024-12-11T06:15:05,779 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/recovered.edits/8.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2/recovered.edits/8.seqid 2024-12-11T06:15:05,780 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/recovered.edits/12.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e/recovered.edits/12.seqid 2024-12-11T06:15:05,780 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/recovered.edits/8.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112/recovered.edits/8.seqid 2024-12-11T06:15:05,780 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/2c89f9f2b59ef9af0984314b2fe91cd2 2024-12-11T06:15:05,781 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/366ea7dc0cdd14943ffc9e04b7ea738e 2024-12-11T06:15:05,781 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c8af3a894e0aa0c3b4b6571d7cab3112 2024-12-11T06:15:05,781 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-11T06:15:05,788 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,791 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-11T06:15:05,795 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-11T06:15:05,796 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,796 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-11T06:15:05,796 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897705796"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:05,801 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-11T06:15:05,801 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 366ea7dc0cdd14943ffc9e04b7ea738e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T06:15:05,801 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-11T06:15:05,801 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897705801"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:05,804 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-11T06:15:05,805 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,806 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 57 msec 2024-12-11T06:15:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-11T06:15:05,877 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:05,877 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-11T06:15:05,878 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T06:15:05,885 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897705885"}]},"ts":"1733897705885"} 2024-12-11T06:15:05,887 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-11T06:15:05,887 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-11T06:15:05,888 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-11T06:15:05,890 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, UNASSIGN}] 2024-12-11T06:15:05,892 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, UNASSIGN 2024-12-11T06:15:05,893 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, UNASSIGN 2024-12-11T06:15:05,894 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=99b5060b61ba875b95902ed3a94eda94, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:05,895 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=f89cd9328508cc3f2890bb3484b5a573, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:05,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, UNASSIGN because future has completed 2024-12-11T06:15:05,898 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:05,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:05,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, UNASSIGN because future has completed 2024-12-11T06:15:05,899 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:05,899 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:15:05,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T06:15:06,051 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:15:06,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:06,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 99b5060b61ba875b95902ed3a94eda94, disabling compactions & flushes 2024-12-11T06:15:06,052 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:15:06,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:15:06,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. after waiting 0 ms 2024-12-11T06:15:06,052 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:15:06,052 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:15:06,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:06,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing f89cd9328508cc3f2890bb3484b5a573, disabling compactions & flushes 2024-12-11T06:15:06,053 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:15:06,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:15:06,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. after waiting 0 ms 2024-12-11T06:15:06,053 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:15:06,057 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:06,058 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:06,058 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:06,058 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94. 2024-12-11T06:15:06,058 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:06,058 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 99b5060b61ba875b95902ed3a94eda94: Waiting for close lock at 1733897706052Running coprocessor pre-close hooks at 1733897706052Disabling compacts and flushes for region at 1733897706052Disabling writes for close at 1733897706052Writing region close event to WAL at 1733897706053 (+1 ms)Running coprocessor post-close hooks at 1733897706058 (+5 ms)Closed at 1733897706058 2024-12-11T06:15:06,058 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573. 2024-12-11T06:15:06,058 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for f89cd9328508cc3f2890bb3484b5a573: Waiting for close lock at 1733897706053Running coprocessor pre-close hooks at 1733897706053Disabling compacts and flushes for region at 1733897706053Disabling writes for close at 1733897706053Writing region close event to WAL at 1733897706054 (+1 ms)Running coprocessor post-close hooks at 1733897706058 (+4 ms)Closed at 1733897706058 2024-12-11T06:15:06,061 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:15:06,062 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=99b5060b61ba875b95902ed3a94eda94, regionState=CLOSED 2024-12-11T06:15:06,062 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:15:06,063 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=f89cd9328508cc3f2890bb3484b5a573, regionState=CLOSED 2024-12-11T06:15:06,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:06,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:15:06,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-12-11T06:15:06,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 99b5060b61ba875b95902ed3a94eda94, server=b66397bb9319,42579,1733897428663 in 168 msec 2024-12-11T06:15:06,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-12-11T06:15:06,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=99b5060b61ba875b95902ed3a94eda94, UNASSIGN in 178 msec 2024-12-11T06:15:06,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure f89cd9328508cc3f2890bb3484b5a573, server=b66397bb9319,44539,1733897428597 in 169 msec 2024-12-11T06:15:06,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-11T06:15:06,072 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=f89cd9328508cc3f2890bb3484b5a573, UNASSIGN in 180 msec 2024-12-11T06:15:06,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-11T06:15:06,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 184 msec 2024-12-11T06:15:06,087 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897706086"}]},"ts":"1733897706086"} 2024-12-11T06:15:06,089 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-11T06:15:06,089 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-11T06:15:06,092 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 212 msec 2024-12-11T06:15:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-11T06:15:06,197 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T06:15:06,197 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,199 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,200 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,202 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,206 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T06:15:06,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T06:15:06,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T06:15:06,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-11T06:15:06,207 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:15:06,207 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:15:06,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,210 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/recovered.edits] 2024-12-11T06:15:06,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,211 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/recovered.edits] 2024-12-11T06:15:06,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-11T06:15:06,216 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/cf/e2c547a0df3b4abdba800125927ed443 2024-12-11T06:15:06,217 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/cf/10faded84e0f4b279ee342198121ce65 2024-12-11T06:15:06,221 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94/recovered.edits/9.seqid 2024-12-11T06:15:06,221 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573/recovered.edits/9.seqid 2024-12-11T06:15:06,222 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:15:06,222 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithMergeRegion/99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:15:06,222 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-11T06:15:06,222 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-11T06:15:06,223 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-11T06:15:06,228 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202412115100a90eb7204a8a9402d16f4d4e52fc_99b5060b61ba875b95902ed3a94eda94 2024-12-11T06:15:06,228 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241211ac6d88e3981a4aba95fa8c6a2a5a09de_f89cd9328508cc3f2890bb3484b5a573 2024-12-11T06:15:06,229 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-11T06:15:06,232 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,236 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-11T06:15:06,239 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-11T06:15:06,245 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,245 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-11T06:15:06,245 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897706245"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:06,245 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897706245"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:06,249 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:15:06,249 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 99b5060b61ba875b95902ed3a94eda94, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733897684292.99b5060b61ba875b95902ed3a94eda94.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f89cd9328508cc3f2890bb3484b5a573, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733897684292.f89cd9328508cc3f2890bb3484b5a573.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:15:06,249 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-11T06:15:06,249 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897706249"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:06,252 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-11T06:15:06,253 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 56 msec 2024-12-11T06:15:06,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-11T06:15:06,317 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,317 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-11T06:15:06,330 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T06:15:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-11T06:15:06,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:06,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-11T06:15:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:06,375 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=813 (was 813), OpenFileDescriptor=792 (was 777) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=878 (was 877) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 23), AvailableMemoryMB=1079 (was 1300) 2024-12-11T06:15:06,375 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-11T06:15:06,413 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=813, OpenFileDescriptor=792, MaxFileDescriptor=1048576, SystemLoadAverage=878, ProcessCount=23, AvailableMemoryMB=1073 2024-12-11T06:15:06,413 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-11T06:15:06,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:15:06,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:06,418 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:15:06,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-11T06:15:06,420 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:15:06,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T06:15:06,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742242_1418 (size=443) 2024-12-11T06:15:06,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742242_1418 (size=443) 2024-12-11T06:15:06,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742242_1418 (size=443) 2024-12-11T06:15:06,505 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => bae42e7466db13f83873c01b77fb7282, NAME => 'testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:06,505 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2d77afec07b318acc59e4829d6f19a06, NAME => 'testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:06,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742243_1419 (size=68) 2024-12-11T06:15:06,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742243_1419 (size=68) 2024-12-11T06:15:06,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742243_1419 (size=68) 2024-12-11T06:15:06,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742244_1420 (size=68) 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing bae42e7466db13f83873c01b77fb7282, disabling compactions & flushes 2024-12-11T06:15:06,514 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. after waiting 0 ms 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,514 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,514 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for bae42e7466db13f83873c01b77fb7282: Waiting for close lock at 1733897706514Disabling compacts and flushes for region at 1733897706514Disabling writes for close at 1733897706514Writing region close event to WAL at 1733897706514Closed at 1733897706514 2024-12-11T06:15:06,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742244_1420 (size=68) 2024-12-11T06:15:06,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742244_1420 (size=68) 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 2d77afec07b318acc59e4829d6f19a06, disabling compactions & flushes 2024-12-11T06:15:06,517 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. after waiting 0 ms 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,517 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,517 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2d77afec07b318acc59e4829d6f19a06: Waiting for close lock at 1733897706517Disabling compacts and flushes for region at 1733897706517Disabling writes for close at 1733897706517Writing region close event to WAL at 1733897706517Closed at 1733897706517 2024-12-11T06:15:06,519 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:15:06,519 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733897706519"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897706519"}]},"ts":"1733897706519"} 2024-12-11T06:15:06,519 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733897706519"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897706519"}]},"ts":"1733897706519"} 2024-12-11T06:15:06,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T06:15:06,527 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:15:06,529 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:15:06,529 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897706529"}]},"ts":"1733897706529"} 2024-12-11T06:15:06,531 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T06:15:06,531 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:15:06,533 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:15:06,533 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:15:06,533 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:15:06,533 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:15:06,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, ASSIGN}] 2024-12-11T06:15:06,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, ASSIGN 2024-12-11T06:15:06,542 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, ASSIGN 2024-12-11T06:15:06,548 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, ASSIGN; state=OFFLINE, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:15:06,548 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:15:06,699 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:15:06,699 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=2d77afec07b318acc59e4829d6f19a06, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:06,699 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=bae42e7466db13f83873c01b77fb7282, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:06,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, ASSIGN because future has completed 2024-12-11T06:15:06,702 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:06,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, ASSIGN because future has completed 2024-12-11T06:15:06,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:15:06,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T06:15:06,857 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,858 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => bae42e7466db13f83873c01b77fb7282, NAME => 'testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:15:06,858 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. service=AccessControlService 2024-12-11T06:15:06,858 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:06,859 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,859 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:06,859 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,859 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,864 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,864 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => 2d77afec07b318acc59e4829d6f19a06, NAME => 'testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:15:06,865 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. service=AccessControlService 2024-12-11T06:15:06,865 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:06,865 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,865 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:06,865 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,865 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,868 INFO [StoreOpener-bae42e7466db13f83873c01b77fb7282-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,869 INFO [StoreOpener-2d77afec07b318acc59e4829d6f19a06-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,870 INFO [StoreOpener-bae42e7466db13f83873c01b77fb7282-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bae42e7466db13f83873c01b77fb7282 columnFamilyName cf 2024-12-11T06:15:06,871 DEBUG [StoreOpener-bae42e7466db13f83873c01b77fb7282-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:06,872 INFO [StoreOpener-bae42e7466db13f83873c01b77fb7282-1 {}] regionserver.HStore(327): Store=bae42e7466db13f83873c01b77fb7282/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:06,872 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,873 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,873 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,874 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,874 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,874 INFO [StoreOpener-2d77afec07b318acc59e4829d6f19a06-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d77afec07b318acc59e4829d6f19a06 columnFamilyName cf 2024-12-11T06:15:06,875 DEBUG [StoreOpener-2d77afec07b318acc59e4829d6f19a06-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:06,875 INFO [StoreOpener-2d77afec07b318acc59e4829d6f19a06-1 {}] regionserver.HStore(327): Store=2d77afec07b318acc59e4829d6f19a06/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:06,875 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,876 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,876 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,877 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,877 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,877 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,878 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:06,879 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened bae42e7466db13f83873c01b77fb7282; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64067514, jitterRate=-0.04531964659690857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:06,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:06,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for bae42e7466db13f83873c01b77fb7282: Running coprocessor pre-open hook at 1733897706859Writing region info on filesystem at 1733897706859Initializing all the Stores at 1733897706860 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897706860Cleaning up temporary data from old regions at 1733897706874 (+14 ms)Running coprocessor post-open hooks at 1733897706879 (+5 ms)Region opened successfully at 1733897706880 (+1 ms) 2024-12-11T06:15:06,881 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282., pid=172, masterSystemTime=1733897706854 2024-12-11T06:15:06,881 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:06,882 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened 2d77afec07b318acc59e4829d6f19a06; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61969828, jitterRate=-0.07657760381698608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:06,882 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:06,882 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for 2d77afec07b318acc59e4829d6f19a06: Running coprocessor pre-open hook at 1733897706865Writing region info on filesystem at 1733897706865Initializing all the Stores at 1733897706866 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897706866Cleaning up temporary data from old regions at 1733897706877 (+11 ms)Running coprocessor post-open hooks at 1733897706882 (+5 ms)Region opened successfully at 1733897706882 2024-12-11T06:15:06,883 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06., pid=173, masterSystemTime=1733897706856 2024-12-11T06:15:06,884 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,884 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:06,884 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=bae42e7466db13f83873c01b77fb7282, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:06,885 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,885 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:06,886 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=2d77afec07b318acc59e4829d6f19a06, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:06,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:06,892 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b66397bb9319,44539,1733897428597, table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T06:15:06,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-11T06:15:06,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663 in 187 msec 2024-12-11T06:15:06,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, ASSIGN in 359 msec 2024-12-11T06:15:06,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:15:06,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-11T06:15:06,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597 in 200 msec 2024-12-11T06:15:06,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-12-11T06:15:06,907 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, ASSIGN in 371 msec 2024-12-11T06:15:06,908 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:15:06,908 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897706908"}]},"ts":"1733897706908"} 2024-12-11T06:15:06,910 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T06:15:06,911 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:15:06,911 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T06:15:06,915 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:06,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:06,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:06,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:06,924 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:06,925 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:06,932 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 508 msec 2024-12-11T06:15:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-11T06:15:07,047 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T06:15:07,047 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:07,051 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-11T06:15:07,051 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:07,051 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:07,053 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:07,059 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:07,065 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:07,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T06:15:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897707068 (current time:1733897707068). 2024-12-11T06:15:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T06:15:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:07,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@288027e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:07,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:07,078 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:07,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:07,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:07,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7aed0c76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:07,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:07,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,080 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45850, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:07,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62db341c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:07,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:07,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:07,083 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:07,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:07,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:07,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d527e92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:07,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:07,087 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:07,088 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5910a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:07,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,090 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:07,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@796d8b00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:07,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:07,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:07,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:07,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51354, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:07,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:15:07,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:07,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:07,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:07,100 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:07,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:07,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T06:15:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T06:15:07,105 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T06:15:07,106 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:07,111 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:07,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742245_1421 (size=170) 2024-12-11T06:15:07,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742245_1421 (size=170) 2024-12-11T06:15:07,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742245_1421 (size=170) 2024-12-11T06:15:07,123 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:07,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282}] 2024-12-11T06:15:07,124 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:07,124 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:07,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T06:15:07,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-11T06:15:07,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for 2d77afec07b318acc59e4829d6f19a06: 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for bae42e7466db13f83873c01b77fb7282: 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:07,277 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742246_1422 (size=71) 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742246_1422 (size=71) 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742246_1422 (size=71) 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742247_1423 (size=71) 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742247_1423 (size=71) 2024-12-11T06:15:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742247_1423 (size=71) 2024-12-11T06:15:07,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T06:15:07,598 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-11T06:15:07,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:07,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:07,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-11T06:15:07,697 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-11T06:15:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-11T06:15:07,698 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-11T06:15:07,698 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:07,698 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:07,698 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:07,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 in 576 msec 2024-12-11T06:15:07,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-11T06:15:07,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 in 576 msec 2024-12-11T06:15:07,702 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:07,703 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:07,704 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:07,704 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:07,704 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:07,704 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:15:07,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742248_1424 (size=63) 2024-12-11T06:15:07,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742248_1424 (size=63) 2024-12-11T06:15:07,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742248_1424 (size=63) 2024-12-11T06:15:07,731 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:07,731 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:07,732 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T06:15:07,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742249_1425 (size=653) 2024-12-11T06:15:07,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742249_1425 (size=653) 2024-12-11T06:15:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742249_1425 (size=653) 2024-12-11T06:15:07,758 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:07,764 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:07,764 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:07,766 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:07,766 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-11T06:15:07,768 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 665 msec 2024-12-11T06:15:08,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T06:15:08,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T06:15:08,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-11T06:15:08,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-11T06:15:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-11T06:15:08,246 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T06:15:08,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44539 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:08,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:08,254 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:08,256 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-11T06:15:08,256 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:08,256 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:08,258 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:08,262 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:08,266 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:08,268 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T06:15:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897708268 (current time:1733897708268). 2024-12-11T06:15:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T06:15:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2aad9900, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:08,270 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:08,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:08,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:08,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33be2322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:08,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:08,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,271 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:08,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41e4f082, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:08,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:08,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:08,274 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51368, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:08,274 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,275 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ec590d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:08,276 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:08,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:08,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:08,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a48e4b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:08,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:08,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,278 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45896, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:08,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@459f001, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:08,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:08,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:08,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51376, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:08,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:15:08,282 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:08,283 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-11T06:15:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T06:15:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T06:15:08,285 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:08,286 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:08,288 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:08,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742250_1426 (size=165) 2024-12-11T06:15:08,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742250_1426 (size=165) 2024-12-11T06:15:08,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742250_1426 (size=165) 2024-12-11T06:15:08,294 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:08,294 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282}] 2024-12-11T06:15:08,295 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:08,295 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T06:15:08,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-11T06:15:08,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44539 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-11T06:15:08,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:08,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:08,447 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing 2d77afec07b318acc59e4829d6f19a06 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T06:15:08,447 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing bae42e7466db13f83873c01b77fb7282 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T06:15:08,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 is 71, key is 0984f70c615ef4c9254c38fc42d3deed/cf:q/1733897708251/Put/seqid=0 2024-12-11T06:15:08,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 is 71, key is 125d731af55a9530fd1af82e0825fdea/cf:q/1733897708253/Put/seqid=0 2024-12-11T06:15:08,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742252_1428 (size=8241) 2024-12-11T06:15:08,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742251_1427 (size=5032) 2024-12-11T06:15:08,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742251_1427 (size=5032) 2024-12-11T06:15:08,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742252_1428 (size=8241) 2024-12-11T06:15:08,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742251_1427 (size=5032) 2024-12-11T06:15:08,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:08,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742252_1428 (size=8241) 2024-12-11T06:15:08,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:08,485 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:08,485 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:08,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/.tmp/cf/7cf748ea94044adca8713ba2d1dd9a23, store: [table=testtb-testExportExpiredSnapshot family=cf region=2d77afec07b318acc59e4829d6f19a06] 2024-12-11T06:15:08,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/.tmp/cf/7cf748ea94044adca8713ba2d1dd9a23 is 209, key is 01952db2e5b66ddb4d50131f7b7e94474/cf:q/1733897708251/Put/seqid=0 2024-12-11T06:15:08,486 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/.tmp/cf/afc3e4a6e646475d95de01b9bd237ed9, store: [table=testtb-testExportExpiredSnapshot family=cf region=bae42e7466db13f83873c01b77fb7282] 2024-12-11T06:15:08,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/.tmp/cf/afc3e4a6e646475d95de01b9bd237ed9 is 209, key is 1accab367e9a0475a72787624f63c798d/cf:q/1733897708253/Put/seqid=0 2024-12-11T06:15:08,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742253_1429 (size=5709) 2024-12-11T06:15:08,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742254_1430 (size=15202) 2024-12-11T06:15:08,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742253_1429 (size=5709) 2024-12-11T06:15:08,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742253_1429 (size=5709) 2024-12-11T06:15:08,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742254_1430 (size=15202) 2024-12-11T06:15:08,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742254_1430 (size=15202) 2024-12-11T06:15:08,499 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/.tmp/cf/7cf748ea94044adca8713ba2d1dd9a23 2024-12-11T06:15:08,500 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/.tmp/cf/afc3e4a6e646475d95de01b9bd237ed9 2024-12-11T06:15:08,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/.tmp/cf/afc3e4a6e646475d95de01b9bd237ed9 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9 2024-12-11T06:15:08,506 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/.tmp/cf/7cf748ea94044adca8713ba2d1dd9a23 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23 2024-12-11T06:15:08,510 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9, entries=48, sequenceid=6, filesize=14.8 K 2024-12-11T06:15:08,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23, entries=2, sequenceid=6, filesize=5.6 K 2024-12-11T06:15:08,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for bae42e7466db13f83873c01b77fb7282 in 64ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:08,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for bae42e7466db13f83873c01b77fb7282: 2024-12-11T06:15:08,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T06:15:08,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 2d77afec07b318acc59e4829d6f19a06 in 64ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:08,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:08,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9] hfiles 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for 2d77afec07b318acc59e4829d6f19a06: 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. for snaptb0-testExportExpiredSnapshot completed. 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23] hfiles 2024-12-11T06:15:08,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742256_1432 (size=110) 2024-12-11T06:15:08,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742256_1432 (size=110) 2024-12-11T06:15:08,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742255_1431 (size=110) 2024-12-11T06:15:08,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742255_1431 (size=110) 2024-12-11T06:15:08,527 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:08,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-11T06:15:08,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:08,528 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-11T06:15:08,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742256_1432 (size=110) 2024-12-11T06:15:08,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742255_1431 (size=110) 2024-12-11T06:15:08,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-11T06:15:08,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-11T06:15:08,528 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:08,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:08,528 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:08,529 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:08,531 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bae42e7466db13f83873c01b77fb7282 in 235 msec 2024-12-11T06:15:08,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=177 2024-12-11T06:15:08,532 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2d77afec07b318acc59e4829d6f19a06 in 235 msec 2024-12-11T06:15:08,532 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:08,533 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:08,533 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:08,533 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:08,534 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:08,535 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06] hfiles 2024-12-11T06:15:08,535 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:08,535 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742257_1433 (size=294) 2024-12-11T06:15:08,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742257_1433 (size=294) 2024-12-11T06:15:08,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742257_1433 (size=294) 2024-12-11T06:15:08,543 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:08,543 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,543 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742258_1434 (size=963) 2024-12-11T06:15:08,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742258_1434 (size=963) 2024-12-11T06:15:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742258_1434 (size=963) 2024-12-11T06:15:08,554 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:08,559 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:08,559 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:08,560 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:08,560 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-11T06:15:08,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 277 msec 2024-12-11T06:15:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-11T06:15:08,597 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T06:15:08,598 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:15:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-11T06:15:08,600 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:15:08,600 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-11T06:15:08,601 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:15:08,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T06:15:08,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742259_1435 (size=436) 2024-12-11T06:15:08,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742259_1435 (size=436) 2024-12-11T06:15:08,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742259_1435 (size=436) 2024-12-11T06:15:08,609 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 84d6f5deb75d828a55d48a410e730336, NAME => 'testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:08,609 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 122c4bf679e9253b1a206af0572eb5cb, NAME => 'testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:08,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742260_1436 (size=61) 2024-12-11T06:15:08,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742260_1436 (size=61) 2024-12-11T06:15:08,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742261_1437 (size=61) 2024-12-11T06:15:08,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742261_1437 (size=61) 2024-12-11T06:15:08,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742260_1436 (size=61) 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:08,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742261_1437 (size=61) 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 122c4bf679e9253b1a206af0572eb5cb, disabling compactions & flushes 2024-12-11T06:15:08,617 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. after waiting 0 ms 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,617 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 122c4bf679e9253b1a206af0572eb5cb: Waiting for close lock at 1733897708617Disabling compacts and flushes for region at 1733897708617Disabling writes for close at 1733897708617Writing region close event to WAL at 1733897708617Closed at 1733897708617 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 84d6f5deb75d828a55d48a410e730336, disabling compactions & flushes 2024-12-11T06:15:08,617 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. after waiting 0 ms 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,617 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,617 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 84d6f5deb75d828a55d48a410e730336: Waiting for close lock at 1733897708617Disabling compacts and flushes for region at 1733897708617Disabling writes for close at 1733897708617Writing region close event to WAL at 1733897708617Closed at 1733897708617 2024-12-11T06:15:08,618 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:15:08,619 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733897708618"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897708618"}]},"ts":"1733897708618"} 2024-12-11T06:15:08,619 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733897708618"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897708618"}]},"ts":"1733897708618"} 2024-12-11T06:15:08,621 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:15:08,622 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:15:08,622 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897708622"}]},"ts":"1733897708622"} 2024-12-11T06:15:08,623 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-11T06:15:08,623 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:15:08,625 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:15:08,625 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:15:08,625 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:15:08,625 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:15:08,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=84d6f5deb75d828a55d48a410e730336, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, ASSIGN}] 2024-12-11T06:15:08,626 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, ASSIGN 2024-12-11T06:15:08,626 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=84d6f5deb75d828a55d48a410e730336, ASSIGN 2024-12-11T06:15:08,626 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=84d6f5deb75d828a55d48a410e730336, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:15:08,626 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:15:08,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T06:15:08,777 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:15:08,777 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=84d6f5deb75d828a55d48a410e730336, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:08,777 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:08,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=84d6f5deb75d828a55d48a410e730336, ASSIGN because future has completed 2024-12-11T06:15:08,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84d6f5deb75d828a55d48a410e730336, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:08,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, ASSIGN because future has completed 2024-12-11T06:15:08,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T06:15:08,934 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,934 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 84d6f5deb75d828a55d48a410e730336, NAME => 'testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:15:08,934 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. service=AccessControlService 2024-12-11T06:15:08,934 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,934 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 122c4bf679e9253b1a206af0572eb5cb, NAME => 'testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:15:08,934 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. service=AccessControlService 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,935 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,935 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,936 INFO [StoreOpener-84d6f5deb75d828a55d48a410e730336-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,937 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,937 INFO [StoreOpener-84d6f5deb75d828a55d48a410e730336-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84d6f5deb75d828a55d48a410e730336 columnFamilyName cf 2024-12-11T06:15:08,938 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 122c4bf679e9253b1a206af0572eb5cb columnFamilyName cf 2024-12-11T06:15:08,938 DEBUG [StoreOpener-84d6f5deb75d828a55d48a410e730336-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:08,938 DEBUG [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:08,938 INFO [StoreOpener-84d6f5deb75d828a55d48a410e730336-1 {}] regionserver.HStore(327): Store=84d6f5deb75d828a55d48a410e730336/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:08,939 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,939 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] regionserver.HStore(327): Store=122c4bf679e9253b1a206af0572eb5cb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:08,939 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,939 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,940 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,941 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,942 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,943 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:08,943 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 84d6f5deb75d828a55d48a410e730336; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67985806, jitterRate=0.013067454099655151}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:08,944 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:08,944 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:08,944 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 122c4bf679e9253b1a206af0572eb5cb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72046790, jitterRate=0.07358083128929138}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:08,944 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:08,944 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 122c4bf679e9253b1a206af0572eb5cb: Running coprocessor pre-open hook at 1733897708935Writing region info on filesystem at 1733897708935Initializing all the Stores at 1733897708936 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897708936Cleaning up temporary data from old regions at 1733897708940 (+4 ms)Running coprocessor post-open hooks at 1733897708944 (+4 ms)Region opened successfully at 1733897708944 2024-12-11T06:15:08,944 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 84d6f5deb75d828a55d48a410e730336: Running coprocessor pre-open hook at 1733897708935Writing region info on filesystem at 1733897708935Initializing all the Stores at 1733897708935Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897708935Cleaning up temporary data from old regions at 1733897708940 (+5 ms)Running coprocessor post-open hooks at 1733897708944 (+4 ms)Region opened successfully at 1733897708944 2024-12-11T06:15:08,945 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336., pid=183, masterSystemTime=1733897708931 2024-12-11T06:15:08,945 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb., pid=184, masterSystemTime=1733897708932 2024-12-11T06:15:08,946 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,946 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:08,947 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:08,947 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,947 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:08,948 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=84d6f5deb75d828a55d48a410e730336, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:08,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:08,949 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84d6f5deb75d828a55d48a410e730336, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:08,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-11T06:15:08,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441 in 169 msec 2024-12-11T06:15:08,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-11T06:15:08,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 84d6f5deb75d828a55d48a410e730336, server=b66397bb9319,42579,1733897428663 in 171 msec 2024-12-11T06:15:08,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, ASSIGN in 326 msec 2024-12-11T06:15:08,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-11T06:15:08,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=84d6f5deb75d828a55d48a410e730336, ASSIGN in 327 msec 2024-12-11T06:15:08,954 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:15:08,954 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897708954"}]},"ts":"1733897708954"} 2024-12-11T06:15:08,956 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-11T06:15:08,956 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:15:08,957 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-11T06:15:08,959 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:08,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:08,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:08,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:08,961 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,970 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:08,971 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 371 msec 2024-12-11T06:15:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-11T06:15:09,227 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T06:15:09,227 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,229 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-11T06:15:09,229 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:09,230 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:09,231 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,235 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,239 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,246 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:09,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:09,248 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-11T06:15:09,250 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:09,250 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:09,251 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,255 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-11T06:15:09,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T06:15:09,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-11T06:15:09,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fd32e87, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:09,261 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38841452, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,263 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:09,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f30ed1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:09,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:09,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:09,265 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51386, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:09,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:09,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:09,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,266 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7553d68d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:09,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:09,267 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:09,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:09,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:09,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c41676b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:09,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:09,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,269 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45928, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:09,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48c3176f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:09,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:09,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:09,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:09,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:09,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:15:09,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:09,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:09,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:09,274 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-11T06:15:09,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T06:15:09,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T06:15:09,276 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:09,277 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:09,279 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742262_1438 (size=152) 2024-12-11T06:15:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742262_1438 (size=152) 2024-12-11T06:15:09,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742262_1438 (size=152) 2024-12-11T06:15:09,285 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:09,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84d6f5deb75d828a55d48a410e730336}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 122c4bf679e9253b1a206af0572eb5cb}] 2024-12-11T06:15:09,286 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:09,286 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:09,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T06:15:09,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-11T06:15:09,438 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-11T06:15:09,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:09,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:09,438 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 84d6f5deb75d828a55d48a410e730336 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T06:15:09,438 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 122c4bf679e9253b1a206af0572eb5cb 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T06:15:09,457 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412114b44192512d24f46a48435c47e716681_122c4bf679e9253b1a206af0572eb5cb is 71, key is 1106ad36120199c18e9ad8eac887b7ab/cf:q/1733897709247/Put/seqid=0 2024-12-11T06:15:09,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f6d00aa129044368bc4d6dabcccb290b_84d6f5deb75d828a55d48a410e730336 is 71, key is 084e3fb035881e04c1e2765affa0c125/cf:q/1733897709246/Put/seqid=0 2024-12-11T06:15:09,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742263_1439 (size=8101) 2024-12-11T06:15:09,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742263_1439 (size=8101) 2024-12-11T06:15:09,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742263_1439 (size=8101) 2024-12-11T06:15:09,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:09,470 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412114b44192512d24f46a48435c47e716681_122c4bf679e9253b1a206af0572eb5cb to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412114b44192512d24f46a48435c47e716681_122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:09,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/.tmp/cf/619f43f68487482c9afcab08155d889c, store: [table=testExportExpiredSnapshot family=cf region=122c4bf679e9253b1a206af0572eb5cb] 2024-12-11T06:15:09,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/.tmp/cf/619f43f68487482c9afcab08155d889c is 202, key is 1343d2ba4384d20cda05d97eea9707c86/cf:q/1733897709247/Put/seqid=0 2024-12-11T06:15:09,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742264_1440 (size=5171) 2024-12-11T06:15:09,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742264_1440 (size=5171) 2024-12-11T06:15:09,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742265_1441 (size=14463) 2024-12-11T06:15:09,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742265_1441 (size=14463) 2024-12-11T06:15:09,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742265_1441 (size=14463) 2024-12-11T06:15:09,481 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/.tmp/cf/619f43f68487482c9afcab08155d889c 2024-12-11T06:15:09,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742264_1440 (size=5171) 2024-12-11T06:15:09,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:09,486 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f6d00aa129044368bc4d6dabcccb290b_84d6f5deb75d828a55d48a410e730336 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241211f6d00aa129044368bc4d6dabcccb290b_84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:09,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/.tmp/cf/4252d2948df64af6bbe38e7ce7efc8de, store: [table=testExportExpiredSnapshot family=cf region=84d6f5deb75d828a55d48a410e730336] 2024-12-11T06:15:09,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/.tmp/cf/619f43f68487482c9afcab08155d889c as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/cf/619f43f68487482c9afcab08155d889c 2024-12-11T06:15:09,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/.tmp/cf/4252d2948df64af6bbe38e7ce7efc8de is 202, key is 0a208b5a8340405d5a65c48b2dd1a6879/cf:q/1733897709246/Put/seqid=0 2024-12-11T06:15:09,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/cf/619f43f68487482c9afcab08155d889c, entries=46, sequenceid=5, filesize=14.1 K 2024-12-11T06:15:09,494 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 122c4bf679e9253b1a206af0572eb5cb in 56ms, sequenceid=5, compaction requested=false 2024-12-11T06:15:09,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 122c4bf679e9253b1a206af0572eb5cb: 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/cf/619f43f68487482c9afcab08155d889c] hfiles 2024-12-11T06:15:09,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/cf/619f43f68487482c9afcab08155d889c for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742266_1442 (size=6086) 2024-12-11T06:15:09,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742266_1442 (size=6086) 2024-12-11T06:15:09,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742266_1442 (size=6086) 2024-12-11T06:15:09,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/.tmp/cf/4252d2948df64af6bbe38e7ce7efc8de 2024-12-11T06:15:09,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/.tmp/cf/4252d2948df64af6bbe38e7ce7efc8de as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/cf/4252d2948df64af6bbe38e7ce7efc8de 2024-12-11T06:15:09,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742267_1443 (size=103) 2024-12-11T06:15:09,517 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/cf/4252d2948df64af6bbe38e7ce7efc8de, entries=4, sequenceid=5, filesize=5.9 K 2024-12-11T06:15:09,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742267_1443 (size=103) 2024-12-11T06:15:09,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742267_1443 (size=103) 2024-12-11T06:15:09,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:09,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-11T06:15:09,518 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 84d6f5deb75d828a55d48a410e730336 in 80ms, sequenceid=5, compaction requested=false 2024-12-11T06:15:09,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 84d6f5deb75d828a55d48a410e730336: 2024-12-11T06:15:09,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. for snapshot-testExportExpiredSnapshot completed. 2024-12-11T06:15:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-11T06:15:09,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:09,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:09,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/cf/4252d2948df64af6bbe38e7ce7efc8de] hfiles 2024-12-11T06:15:09,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/cf/4252d2948df64af6bbe38e7ce7efc8de for snapshot=snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,519 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:09,522 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 122c4bf679e9253b1a206af0572eb5cb in 235 msec 2024-12-11T06:15:09,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742268_1444 (size=103) 2024-12-11T06:15:09,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742268_1444 (size=103) 2024-12-11T06:15:09,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742268_1444 (size=103) 2024-12-11T06:15:09,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:15:09,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-11T06:15:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-11T06:15:09,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:09,527 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:09,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-11T06:15:09,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 84d6f5deb75d828a55d48a410e730336 in 243 msec 2024-12-11T06:15:09,530 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:09,530 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:09,531 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:09,531 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:09,531 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:09,533 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412114b44192512d24f46a48435c47e716681_122c4bf679e9253b1a206af0572eb5cb, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241211f6d00aa129044368bc4d6dabcccb290b_84d6f5deb75d828a55d48a410e730336] hfiles 2024-12-11T06:15:09,533 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412114b44192512d24f46a48435c47e716681_122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:09,533 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241211f6d00aa129044368bc4d6dabcccb290b_84d6f5deb75d828a55d48a410e730336 2024-12-11T06:15:09,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742269_1445 (size=287) 2024-12-11T06:15:09,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742269_1445 (size=287) 2024-12-11T06:15:09,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742269_1445 (size=287) 2024-12-11T06:15:09,540 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:09,540 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,540 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742270_1446 (size=935) 2024-12-11T06:15:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742270_1446 (size=935) 2024-12-11T06:15:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742270_1446 (size=935) 2024-12-11T06:15:09,552 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:09,557 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:09,557 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-11T06:15:09,558 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:09,558 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-11T06:15:09,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 284 msec 2024-12-11T06:15:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-11T06:15:09,597 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-11T06:15:10,272 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0007_000001 (auth:SIMPLE) from 127.0.0.1:46432 2024-12-11T06:15:10,292 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000001/launch_container.sh] 2024-12-11T06:15:10,292 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000001/container_tokens] 2024-12-11T06:15:10,292 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733897436125_0007/container_1733897436125_0007_01_000001/sysfs] 2024-12-11T06:15:11,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:15:18,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T06:15:18,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-11T06:15:19,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897719605 2024-12-11T06:15:19,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897719605, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897719605, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:19,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:19,633 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897719605, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897719605/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-11T06:15:19,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:15:19,637 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:15:19,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T06:15:19,641 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897719641"}]},"ts":"1733897719641"} 2024-12-11T06:15:19,643 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-11T06:15:19,643 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-11T06:15:19,644 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-11T06:15:19,645 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, UNASSIGN}] 2024-12-11T06:15:19,646 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, UNASSIGN 2024-12-11T06:15:19,646 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, UNASSIGN 2024-12-11T06:15:19,646 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=bae42e7466db13f83873c01b77fb7282, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:19,647 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=2d77afec07b318acc59e4829d6f19a06, regionState=CLOSING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:19,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, UNASSIGN because future has completed 2024-12-11T06:15:19,648 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:19,648 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:19,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, UNASSIGN because future has completed 2024-12-11T06:15:19,649 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:19,649 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:15:19,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T06:15:19,800 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:19,800 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing bae42e7466db13f83873c01b77fb7282, disabling compactions & flushes 2024-12-11T06:15:19,801 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 2d77afec07b318acc59e4829d6f19a06, disabling compactions & flushes 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:19,801 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. after waiting 0 ms 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. after waiting 0 ms 2024-12-11T06:15:19,801 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:19,805 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:19,805 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:19,806 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:19,806 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:19,806 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282. 2024-12-11T06:15:19,806 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for bae42e7466db13f83873c01b77fb7282: Waiting for close lock at 1733897719801Running coprocessor pre-close hooks at 1733897719801Disabling compacts and flushes for region at 1733897719801Disabling writes for close at 1733897719801Writing region close event to WAL at 1733897719801Running coprocessor post-close hooks at 1733897719806 (+5 ms)Closed at 1733897719806 2024-12-11T06:15:19,806 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06. 2024-12-11T06:15:19,806 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 2d77afec07b318acc59e4829d6f19a06: Waiting for close lock at 1733897719801Running coprocessor pre-close hooks at 1733897719801Disabling compacts and flushes for region at 1733897719801Disabling writes for close at 1733897719801Writing region close event to WAL at 1733897719802 (+1 ms)Running coprocessor post-close hooks at 1733897719806 (+4 ms)Closed at 1733897719806 2024-12-11T06:15:19,808 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:19,808 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=bae42e7466db13f83873c01b77fb7282, regionState=CLOSED 2024-12-11T06:15:19,809 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:19,809 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=2d77afec07b318acc59e4829d6f19a06, regionState=CLOSED 2024-12-11T06:15:19,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:19,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:15:19,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-11T06:15:19,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure bae42e7466db13f83873c01b77fb7282, server=b66397bb9319,42579,1733897428663 in 163 msec 2024-12-11T06:15:19,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=bae42e7466db13f83873c01b77fb7282, UNASSIGN in 167 msec 2024-12-11T06:15:19,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-12-11T06:15:19,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure 2d77afec07b318acc59e4829d6f19a06, server=b66397bb9319,44539,1733897428597 in 164 msec 2024-12-11T06:15:19,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-11T06:15:19,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=2d77afec07b318acc59e4829d6f19a06, UNASSIGN in 170 msec 2024-12-11T06:15:19,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-11T06:15:19,818 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 173 msec 2024-12-11T06:15:19,820 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897719819"}]},"ts":"1733897719819"} 2024-12-11T06:15:19,821 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-11T06:15:19,821 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-11T06:15:19,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 184 msec 2024-12-11T06:15:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-11T06:15:19,956 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T06:15:19,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,958 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,959 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,961 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,962 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:19,962 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:19,964 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/recovered.edits] 2024-12-11T06:15:19,964 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/recovered.edits] 2024-12-11T06:15:19,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T06:15:19,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T06:15:19,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T06:15:19,968 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-11T06:15:19,968 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/cf/7cf748ea94044adca8713ba2d1dd9a23 2024-12-11T06:15:19,968 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/cf/afc3e4a6e646475d95de01b9bd237ed9 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:19,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-11T06:15:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:19,972 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282/recovered.edits/9.seqid 2024-12-11T06:15:19,972 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:19,972 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06/recovered.edits/9.seqid 2024-12-11T06:15:19,973 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportExpiredSnapshot/2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:19,973 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-11T06:15:19,973 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-11T06:15:19,974 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-11T06:15:19,977 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241211b0d6784878b24f0298a63b78783f50c4_2d77afec07b318acc59e4829d6f19a06 2024-12-11T06:15:19,977 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241211e835cf0d8a35466cbfcafb72d575bde4_bae42e7466db13f83873c01b77fb7282 2024-12-11T06:15:19,977 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-11T06:15:19,979 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,981 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-11T06:15:19,983 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-11T06:15:19,984 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,984 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-11T06:15:19,984 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897719984"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:19,984 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897719984"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:19,986 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:15:19,986 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2d77afec07b318acc59e4829d6f19a06, NAME => 'testtb-testExportExpiredSnapshot,,1733897706414.2d77afec07b318acc59e4829d6f19a06.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => bae42e7466db13f83873c01b77fb7282, NAME => 'testtb-testExportExpiredSnapshot,1,1733897706414.bae42e7466db13f83873c01b77fb7282.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:15:19,986 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-11T06:15:19,986 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897719986"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:19,987 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-11T06:15:19,988 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-11T06:15:19,989 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 31 msec 2024-12-11T06:15:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-11T06:15:20,077 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-11T06:15:20,077 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-11T06:15:20,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T06:15:20,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-11T06:15:20,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-11T06:15:20,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-11T06:15:20,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-11T06:15:20,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-11T06:15:20,116 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=806 (was 813), OpenFileDescriptor=773 (was 792), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=720 (was 878), ProcessCount=11 (was 23), AvailableMemoryMB=2127 (was 1073) - AvailableMemoryMB LEAK? - 2024-12-11T06:15:20,116 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-11T06:15:20,132 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=806, OpenFileDescriptor=773, MaxFileDescriptor=1048576, SystemLoadAverage=720, ProcessCount=11, AvailableMemoryMB=2126 2024-12-11T06:15:20,132 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-12-11T06:15:20,133 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:15:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:20,135 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:15:20,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-11T06:15:20,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T06:15:20,136 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:15:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742271_1447 (size=448) 2024-12-11T06:15:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742271_1447 (size=448) 2024-12-11T06:15:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742271_1447 (size=448) 2024-12-11T06:15:20,144 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd0db02aed95bd6781cec37ad5803637, NAME => 'testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:20,144 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e787fa3b025c3182185cfac52b647d88, NAME => 'testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742272_1448 (size=73) 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742272_1448 (size=73) 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742272_1448 (size=73) 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742273_1449 (size=73) 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742273_1449 (size=73) 2024-12-11T06:15:20,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742273_1449 (size=73) 2024-12-11T06:15:20,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T06:15:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T06:15:20,551 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing e787fa3b025c3182185cfac52b647d88, disabling compactions & flushes 2024-12-11T06:15:20,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. after waiting 0 ms 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for e787fa3b025c3182185cfac52b647d88: Waiting for close lock at 1733897720552Disabling compacts and flushes for region at 1733897720552Disabling writes for close at 1733897720552Writing region close event to WAL at 1733897720552Closed at 1733897720552 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing cd0db02aed95bd6781cec37ad5803637, disabling compactions & flushes 2024-12-11T06:15:20,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. after waiting 0 ms 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,552 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,552 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd0db02aed95bd6781cec37ad5803637: Waiting for close lock at 1733897720552Disabling compacts and flushes for region at 1733897720552Disabling writes for close at 1733897720552Writing region close event to WAL at 1733897720552Closed at 1733897720552 2024-12-11T06:15:20,553 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:15:20,554 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733897720554"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897720554"}]},"ts":"1733897720554"} 2024-12-11T06:15:20,554 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733897720554"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897720554"}]},"ts":"1733897720554"} 2024-12-11T06:15:20,556 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:15:20,557 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:15:20,557 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897720557"}]},"ts":"1733897720557"} 2024-12-11T06:15:20,558 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-11T06:15:20,558 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:15:20,559 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:15:20,559 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:15:20,559 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:15:20,559 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:15:20,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, ASSIGN}] 2024-12-11T06:15:20,560 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, ASSIGN 2024-12-11T06:15:20,560 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, ASSIGN 2024-12-11T06:15:20,561 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:15:20,561 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:15:20,711 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:15:20,712 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=e787fa3b025c3182185cfac52b647d88, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:20,712 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=cd0db02aed95bd6781cec37ad5803637, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:20,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, ASSIGN because future has completed 2024-12-11T06:15:20,714 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:20,714 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, ASSIGN because future has completed 2024-12-11T06:15:20,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:20,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T06:15:20,869 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,869 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,869 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => cd0db02aed95bd6781cec37ad5803637, NAME => 'testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:15:20,869 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => e787fa3b025c3182185cfac52b647d88, NAME => 'testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:15:20,869 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. service=AccessControlService 2024-12-11T06:15:20,869 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. service=AccessControlService 2024-12-11T06:15:20,869 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:20,869 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,870 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,876 INFO [StoreOpener-e787fa3b025c3182185cfac52b647d88-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,876 INFO [StoreOpener-cd0db02aed95bd6781cec37ad5803637-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,877 INFO [StoreOpener-cd0db02aed95bd6781cec37ad5803637-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd0db02aed95bd6781cec37ad5803637 columnFamilyName cf 2024-12-11T06:15:20,877 INFO [StoreOpener-e787fa3b025c3182185cfac52b647d88-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e787fa3b025c3182185cfac52b647d88 columnFamilyName cf 2024-12-11T06:15:20,878 DEBUG [StoreOpener-cd0db02aed95bd6781cec37ad5803637-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:20,878 DEBUG [StoreOpener-e787fa3b025c3182185cfac52b647d88-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:20,878 INFO [StoreOpener-cd0db02aed95bd6781cec37ad5803637-1 {}] regionserver.HStore(327): Store=cd0db02aed95bd6781cec37ad5803637/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:20,878 INFO [StoreOpener-e787fa3b025c3182185cfac52b647d88-1 {}] regionserver.HStore(327): Store=e787fa3b025c3182185cfac52b647d88/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:20,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,879 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,880 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,881 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,882 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,883 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:20,883 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened cd0db02aed95bd6781cec37ad5803637; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75150521, jitterRate=0.11983002722263336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:20,883 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:20,884 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:20,884 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for cd0db02aed95bd6781cec37ad5803637: Running coprocessor pre-open hook at 1733897720870Writing region info on filesystem at 1733897720870Initializing all the Stores at 1733897720871 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897720871Cleaning up temporary data from old regions at 1733897720880 (+9 ms)Running coprocessor post-open hooks at 1733897720883 (+3 ms)Region opened successfully at 1733897720884 (+1 ms) 2024-12-11T06:15:20,884 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened e787fa3b025c3182185cfac52b647d88; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60904033, jitterRate=-0.09245918691158295}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:20,884 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:20,884 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for e787fa3b025c3182185cfac52b647d88: Running coprocessor pre-open hook at 1733897720870Writing region info on filesystem at 1733897720870Initializing all the Stores at 1733897720871 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897720871Cleaning up temporary data from old regions at 1733897720880 (+9 ms)Running coprocessor post-open hooks at 1733897720884 (+4 ms)Region opened successfully at 1733897720884 2024-12-11T06:15:20,885 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637., pid=199, masterSystemTime=1733897720866 2024-12-11T06:15:20,885 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88., pid=198, masterSystemTime=1733897720865 2024-12-11T06:15:20,886 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,886 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:20,887 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=cd0db02aed95bd6781cec37ad5803637, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:20,887 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,887 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:20,887 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=e787fa3b025c3182185cfac52b647d88, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:20,888 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:20,889 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:20,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-11T06:15:20,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663 in 175 msec 2024-12-11T06:15:20,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-11T06:15:20,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441 in 176 msec 2024-12-11T06:15:20,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, ASSIGN in 331 msec 2024-12-11T06:15:20,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=195 2024-12-11T06:15:20,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, ASSIGN in 332 msec 2024-12-11T06:15:20,894 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:15:20,894 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897720894"}]},"ts":"1733897720894"} 2024-12-11T06:15:20,895 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-11T06:15:20,895 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:15:20,896 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-11T06:15:20,898 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:15:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:20,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,902 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:20,903 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 768 msec 2024-12-11T06:15:21,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-11T06:15:21,277 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T06:15:21,277 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,279 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-11T06:15:21,279 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,279 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:21,281 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,284 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,289 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:15:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897721290 (current time:1733897721290). 2024-12-11T06:15:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T06:15:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ac71dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:21,292 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:21,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:21,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b4b63, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:21,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:21,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,294 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46698, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:21,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c85eb72, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:21,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:21,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:21,296 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55678, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:21,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,297 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25e3708a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:21,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:21,298 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:21,298 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154d5a0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:21,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,299 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:21,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79eca52e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:21,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:21,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:21,302 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55688, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:21,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:15:21,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:21,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,305 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:15:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:15:21,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T06:15:21,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T06:15:21,307 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:21,308 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:21,310 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:21,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742274_1450 (size=185) 2024-12-11T06:15:21,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742274_1450 (size=185) 2024-12-11T06:15:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742274_1450 (size=185) 2024-12-11T06:15:21,319 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:21,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88}] 2024-12-11T06:15:21,320 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,320 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T06:15:21,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-11T06:15:21,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-11T06:15:21,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:21,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for e787fa3b025c3182185cfac52b647d88: 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for cd0db02aed95bd6781cec37ad5803637: 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:21,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:21,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742276_1452 (size=76) 2024-12-11T06:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742275_1451 (size=76) 2024-12-11T06:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742275_1451 (size=76) 2024-12-11T06:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742275_1451 (size=76) 2024-12-11T06:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742276_1452 (size=76) 2024-12-11T06:15:21,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742276_1452 (size=76) 2024-12-11T06:15:21,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:21,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-11T06:15:21,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-11T06:15:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-11T06:15:21,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-11T06:15:21,484 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,484 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,484 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,484 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,486 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 in 166 msec 2024-12-11T06:15:21,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-11T06:15:21,487 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 in 166 msec 2024-12-11T06:15:21,487 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:21,488 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:21,489 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:21,489 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:21,489 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:21,490 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:15:21,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742277_1453 (size=68) 2024-12-11T06:15:21,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742277_1453 (size=68) 2024-12-11T06:15:21,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742277_1453 (size=68) 2024-12-11T06:15:21,496 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:21,496 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,496 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742278_1454 (size=673) 2024-12-11T06:15:21,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742278_1454 (size=673) 2024-12-11T06:15:21,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742278_1454 (size=673) 2024-12-11T06:15:21,506 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:21,509 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:21,510 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,511 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:21,511 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-11T06:15:21,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 206 msec 2024-12-11T06:15:21,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-11T06:15:21,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T06:15:21,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:21,636 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:21,638 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-11T06:15:21,640 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,640 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:21,642 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,646 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,651 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-11T06:15:21,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897721653 (current time:1733897721653). 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42c30761, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:21,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:21,654 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:21,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:21,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:21,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f1f6f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:21,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:21,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,655 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46740, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:21,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b23b97a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:21,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:21,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:21,658 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55704, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:21,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,659 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:21,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48d7b943, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:21,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:21,660 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:21,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:21,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:21,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f562537, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:21,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:21,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,661 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:21,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb72f81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:21,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:21,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:21,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:21,664 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:21,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2] 2024-12-11T06:15:21,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063. 2024-12-11T06:15:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:21,667 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:21,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:15:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:21,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-11T06:15:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T06:15:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T06:15:21,669 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:21,670 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:21,673 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742279_1455 (size=180) 2024-12-11T06:15:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742279_1455 (size=180) 2024-12-11T06:15:21,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742279_1455 (size=180) 2024-12-11T06:15:21,679 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:21,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88}] 2024-12-11T06:15:21,680 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,680 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T06:15:21,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-11T06:15:21,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-11T06:15:21,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:21,832 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing cd0db02aed95bd6781cec37ad5803637 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-11T06:15:21,832 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing e787fa3b025c3182185cfac52b647d88 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-11T06:15:21,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 is 71, key is 0007093a260cfca220f7cbd734de65dc/cf:q/1733897721634/Put/seqid=0 2024-12-11T06:15:21,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 is 71, key is 1c0ec18ff9f08a81c003a48376be08a2/cf:q/1733897721636/Put/seqid=0 2024-12-11T06:15:21,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742280_1456 (size=5032) 2024-12-11T06:15:21,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742281_1457 (size=8241) 2024-12-11T06:15:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742281_1457 (size=8241) 2024-12-11T06:15:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742280_1456 (size=5032) 2024-12-11T06:15:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742280_1456 (size=5032) 2024-12-11T06:15:21,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742281_1457 (size=8241) 2024-12-11T06:15:21,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:21,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:21,867 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,867 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,867 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/.tmp/cf/b9e883081be0433597e0431b22b68775, store: [table=testtb-testEmptyExportFileSystemState family=cf region=cd0db02aed95bd6781cec37ad5803637] 2024-12-11T06:15:21,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/.tmp/cf/dfb8103c8e66490087a2555ab25cd75b, store: [table=testtb-testEmptyExportFileSystemState family=cf region=e787fa3b025c3182185cfac52b647d88] 2024-12-11T06:15:21,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/.tmp/cf/b9e883081be0433597e0431b22b68775 is 214, key is 07a75847bdc751dc8b11bbf5438d8cec3/cf:q/1733897721634/Put/seqid=0 2024-12-11T06:15:21,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/.tmp/cf/dfb8103c8e66490087a2555ab25cd75b is 214, key is 1ebb48f82d28c086f77712cac73c83a18/cf:q/1733897721636/Put/seqid=0 2024-12-11T06:15:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742282_1458 (size=15447) 2024-12-11T06:15:21,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742282_1458 (size=15447) 2024-12-11T06:15:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742283_1459 (size=5724) 2024-12-11T06:15:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742282_1458 (size=15447) 2024-12-11T06:15:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742283_1459 (size=5724) 2024-12-11T06:15:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742283_1459 (size=5724) 2024-12-11T06:15:21,874 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/.tmp/cf/dfb8103c8e66490087a2555ab25cd75b 2024-12-11T06:15:21,875 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/.tmp/cf/b9e883081be0433597e0431b22b68775 2024-12-11T06:15:21,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/.tmp/cf/dfb8103c8e66490087a2555ab25cd75b as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b 2024-12-11T06:15:21,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/.tmp/cf/b9e883081be0433597e0431b22b68775 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775 2024-12-11T06:15:21,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775, entries=2, sequenceid=6, filesize=5.6 K 2024-12-11T06:15:21,883 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b, entries=48, sequenceid=6, filesize=15.1 K 2024-12-11T06:15:21,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for e787fa3b025c3182185cfac52b647d88 in 52ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-11T06:15:21,884 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for cd0db02aed95bd6781cec37ad5803637 in 52ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for cd0db02aed95bd6781cec37ad5803637: 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for e787fa3b025c3182185cfac52b647d88: 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775] hfiles 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b] hfiles 2024-12-11T06:15:21,884 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,885 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742285_1461 (size=115) 2024-12-11T06:15:21,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742285_1461 (size=115) 2024-12-11T06:15:21,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742285_1461 (size=115) 2024-12-11T06:15:21,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742284_1460 (size=115) 2024-12-11T06:15:21,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:21,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742284_1460 (size=115) 2024-12-11T06:15:21,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-11T06:15:21,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742284_1460 (size=115) 2024-12-11T06:15:21,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:21,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-11T06:15:21,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-11T06:15:21,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-11T06:15:21,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,895 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,895 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,897 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cd0db02aed95bd6781cec37ad5803637 in 217 msec 2024-12-11T06:15:21,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-11T06:15:21,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e787fa3b025c3182185cfac52b647d88 in 217 msec 2024-12-11T06:15:21,898 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:21,898 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:21,899 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:21,899 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:21,899 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637] hfiles 2024-12-11T06:15:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:21,900 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:21,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742286_1462 (size=299) 2024-12-11T06:15:21,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742286_1462 (size=299) 2024-12-11T06:15:21,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742286_1462 (size=299) 2024-12-11T06:15:21,917 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:21,917 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,918 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742287_1463 (size=983) 2024-12-11T06:15:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742287_1463 (size=983) 2024-12-11T06:15:21,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742287_1463 (size=983) 2024-12-11T06:15:21,932 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:21,937 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:21,937 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:21,938 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:21,938 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-11T06:15:21,940 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 270 msec 2024-12-11T06:15:21,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-11T06:15:21,987 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T06:15:21,987 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987 2024-12-11T06:15:21,987 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:22,016 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:22,016 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:22,017 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:15:22,021 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:22,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742288_1464 (size=185) 2024-12-11T06:15:22,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742288_1464 (size=185) 2024-12-11T06:15:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742288_1464 (size=185) 2024-12-11T06:15:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742289_1465 (size=673) 2024-12-11T06:15:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742289_1465 (size=673) 2024-12-11T06:15:22,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742289_1465 (size=673) 2024-12-11T06:15:22,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:22,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:22,032 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,046 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-3097565804710630943.jar 2024-12-11T06:15:23,047 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,047 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,113 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-16408285018426868521.jar 2024-12-11T06:15:23,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:23,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:15:23,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:15:23,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:15:23,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:15:23,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:15:23,117 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:15:23,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:15:23,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:15:23,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:15:23,118 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:15:23,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:15:23,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:23,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:23,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:23,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:23,120 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:23,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:23,121 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:23,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742290_1466 (size=131440) 2024-12-11T06:15:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742290_1466 (size=131440) 2024-12-11T06:15:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742290_1466 (size=131440) 2024-12-11T06:15:23,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T06:15:23,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T06:15:23,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742291_1467 (size=4188619) 2024-12-11T06:15:23,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T06:15:23,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T06:15:23,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742292_1468 (size=1323991) 2024-12-11T06:15:23,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742293_1469 (size=903542) 2024-12-11T06:15:23,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742293_1469 (size=903542) 2024-12-11T06:15:23,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742293_1469 (size=903542) 2024-12-11T06:15:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T06:15:23,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T06:15:23,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742294_1470 (size=8360360) 2024-12-11T06:15:23,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742295_1471 (size=1877034) 2024-12-11T06:15:23,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742295_1471 (size=1877034) 2024-12-11T06:15:23,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742295_1471 (size=1877034) 2024-12-11T06:15:23,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742296_1472 (size=77835) 2024-12-11T06:15:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742296_1472 (size=77835) 2024-12-11T06:15:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742296_1472 (size=77835) 2024-12-11T06:15:23,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742297_1473 (size=30949) 2024-12-11T06:15:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742297_1473 (size=30949) 2024-12-11T06:15:23,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742297_1473 (size=30949) 2024-12-11T06:15:23,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742298_1474 (size=1597213) 2024-12-11T06:15:23,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742298_1474 (size=1597213) 2024-12-11T06:15:23,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742298_1474 (size=1597213) 2024-12-11T06:15:23,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742299_1475 (size=4695811) 2024-12-11T06:15:23,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742299_1475 (size=4695811) 2024-12-11T06:15:23,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742299_1475 (size=4695811) 2024-12-11T06:15:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742300_1476 (size=232957) 2024-12-11T06:15:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742300_1476 (size=232957) 2024-12-11T06:15:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742300_1476 (size=232957) 2024-12-11T06:15:23,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742301_1477 (size=127628) 2024-12-11T06:15:23,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742301_1477 (size=127628) 2024-12-11T06:15:23,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742301_1477 (size=127628) 2024-12-11T06:15:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742302_1478 (size=20406) 2024-12-11T06:15:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742302_1478 (size=20406) 2024-12-11T06:15:23,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742302_1478 (size=20406) 2024-12-11T06:15:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742303_1479 (size=443172) 2024-12-11T06:15:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742303_1479 (size=443172) 2024-12-11T06:15:23,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742303_1479 (size=443172) 2024-12-11T06:15:23,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742304_1480 (size=5175431) 2024-12-11T06:15:23,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742304_1480 (size=5175431) 2024-12-11T06:15:23,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742304_1480 (size=5175431) 2024-12-11T06:15:23,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742305_1481 (size=217634) 2024-12-11T06:15:23,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742305_1481 (size=217634) 2024-12-11T06:15:23,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742305_1481 (size=217634) 2024-12-11T06:15:23,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742306_1482 (size=1832290) 2024-12-11T06:15:23,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742306_1482 (size=1832290) 2024-12-11T06:15:23,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742306_1482 (size=1832290) 2024-12-11T06:15:23,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742307_1483 (size=322274) 2024-12-11T06:15:23,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742307_1483 (size=322274) 2024-12-11T06:15:23,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742307_1483 (size=322274) 2024-12-11T06:15:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742308_1484 (size=503880) 2024-12-11T06:15:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742308_1484 (size=503880) 2024-12-11T06:15:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742308_1484 (size=503880) 2024-12-11T06:15:23,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742309_1485 (size=29229) 2024-12-11T06:15:23,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742309_1485 (size=29229) 2024-12-11T06:15:23,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742309_1485 (size=29229) 2024-12-11T06:15:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742310_1486 (size=24096) 2024-12-11T06:15:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742310_1486 (size=24096) 2024-12-11T06:15:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742310_1486 (size=24096) 2024-12-11T06:15:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742311_1487 (size=111872) 2024-12-11T06:15:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742311_1487 (size=111872) 2024-12-11T06:15:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742311_1487 (size=111872) 2024-12-11T06:15:23,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742312_1488 (size=45609) 2024-12-11T06:15:23,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742312_1488 (size=45609) 2024-12-11T06:15:23,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742312_1488 (size=45609) 2024-12-11T06:15:23,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742313_1489 (size=6426220) 2024-12-11T06:15:23,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742313_1489 (size=6426220) 2024-12-11T06:15:23,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742313_1489 (size=6426220) 2024-12-11T06:15:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742314_1490 (size=136454) 2024-12-11T06:15:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742314_1490 (size=136454) 2024-12-11T06:15:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742314_1490 (size=136454) 2024-12-11T06:15:23,897 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:15:23,899 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-11T06:15:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742315_1491 (size=7) 2024-12-11T06:15:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742315_1491 (size=7) 2024-12-11T06:15:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742315_1491 (size=7) 2024-12-11T06:15:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742316_1492 (size=10) 2024-12-11T06:15:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742316_1492 (size=10) 2024-12-11T06:15:23,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742316_1492 (size=10) 2024-12-11T06:15:23,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742317_1493 (size=303901) 2024-12-11T06:15:23,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742317_1493 (size=303901) 2024-12-11T06:15:23,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742317_1493 (size=303901) 2024-12-11T06:15:23,939 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:15:23,940 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:15:23,985 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0008_000001 (auth:SIMPLE) from 127.0.0.1:36650 2024-12-11T06:15:25,335 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:15:26,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:15:28,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T06:15:28,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-11T06:15:28,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-11T06:15:28,557 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T06:15:28,619 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T06:15:28,700 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-11T06:15:28,844 DEBUG [master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=20, reuseRatio=66.67% 2024-12-11T06:15:28,844 DEBUG [master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-11T06:15:30,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0008_000001 (auth:SIMPLE) from 127.0.0.1:54496 2024-12-11T06:15:30,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742318_1494 (size=349575) 2024-12-11T06:15:30,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742318_1494 (size=349575) 2024-12-11T06:15:30,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742318_1494 (size=349575) 2024-12-11T06:15:30,706 INFO [regionserver/b66397bb9319:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T06:15:30,706 INFO [regionserver/b66397bb9319:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T06:15:30,809 INFO [regionserver/b66397bb9319:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-11T06:15:31,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742319_1495 (size=8568) 2024-12-11T06:15:31,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742319_1495 (size=8568) 2024-12-11T06:15:31,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742319_1495 (size=8568) 2024-12-11T06:15:31,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742320_1496 (size=460) 2024-12-11T06:15:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742320_1496 (size=460) 2024-12-11T06:15:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742320_1496 (size=460) 2024-12-11T06:15:31,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742321_1497 (size=8568) 2024-12-11T06:15:31,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742321_1497 (size=8568) 2024-12-11T06:15:31,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742321_1497 (size=8568) 2024-12-11T06:15:31,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742322_1498 (size=349575) 2024-12-11T06:15:31,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742322_1498 (size=349575) 2024-12-11T06:15:31,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742322_1498 (size=349575) 2024-12-11T06:15:31,738 INFO [regionserver/b66397bb9319:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 159676 ms 2024-12-11T06:15:31,868 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 122c4bf679e9253b1a206af0572eb5cb changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:31,868 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region e787fa3b025c3182185cfac52b647d88 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:31,868 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 84d6f5deb75d828a55d48a410e730336 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:31,868 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cd0db02aed95bd6781cec37ad5803637 changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:31,868 DEBUG [master/b66397bb9319:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-11T06:15:31,869 INFO [master/b66397bb9319:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-11T06:15:31,869 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-11T06:15:31,869 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 4 regions 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:15:31,870 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:15:31,870 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:15:31,870 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:15:31,870 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-11T06:15:31,873 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.StochasticLoadBalancer(370): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.9999999999999999, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); 2024-12-11T06:15:31,873 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.4947970366745957, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.5773502691896258, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.9999999999999999, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=1.0, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.33333333333333337, need balance); computedMaxSteps=14400 2024-12-11T06:15:32,046 INFO [master/b66397bb9319:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 175 ms to try 14400 different iterations. Found a solution that moves 2 regions; Going from a computed imbalance of 0.4947970366745957 to a new imbalance of 0.017568491615566242. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.3333333333333333, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8157635019787899, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8503847134842856, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-11T06:15:32,050 INFO [master/b66397bb9319:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 2, the balance interval is 150000 ms, and the max number regions in transition is 6 2024-12-11T06:15:32,051 INFO [master/b66397bb9319:0.Chore.1 {}] master.HMaster(2172): balance hri=0c6631449afb0062d10d83de4eb96abd, source=b66397bb9319,35939,1733897428441, destination=b66397bb9319,44539,1733897428597 2024-12-11T06:15:32,052 DEBUG [master/b66397bb9319:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE 2024-12-11T06:15:32,053 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE 2024-12-11T06:15:32,054 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:32,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE because future has completed 2024-12-11T06:15:32,056 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:32,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:32,209 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] handler.UnassignRegionHandler(122): Close 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,210 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:32,210 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1722): Closing 0c6631449afb0062d10d83de4eb96abd, disabling compactions & flushes 2024-12-11T06:15:32,210 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1755): Closing region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,210 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,210 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. after waiting 0 ms 2024-12-11T06:15:32,210 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,210 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(2902): Flushing 0c6631449afb0062d10d83de4eb96abd 1/1 column families, dataSize=1.35 KB heapSize=3.23 KB 2024-12-11T06:15:32,228 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/84b3e9d04f684b81a21fead5ab7d8e19 is 74, key is testtb-testEmptyExportFileSystemState/l:jenkins/1733897720896/Put/seqid=0 2024-12-11T06:15:32,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742323_1499 (size=5735) 2024-12-11T06:15:32,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742323_1499 (size=5735) 2024-12-11T06:15:32,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742323_1499 (size=5735) 2024-12-11T06:15:32,237 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.35 KB at sequenceid=26 (bloomFilter=false), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,241 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,243 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/84b3e9d04f684b81a21fead5ab7d8e19 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/l/84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,248 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,249 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/l/84b3e9d04f684b81a21fead5ab7d8e19, entries=12, sequenceid=26, filesize=5.6 K 2024-12-11T06:15:32,249 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(3140): Finished flush of dataSize ~1.35 KB/1384, heapSize ~3.21 KB/3288, currentSize=0 B/0 for 0c6631449afb0062d10d83de4eb96abd in 39ms, sequenceid=26, compaction requested=false 2024-12-11T06:15:32,253 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/recovered.edits/29.seqid, newMaxSeqId=29, maxSeqId=1 2024-12-11T06:15:32,254 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:32,254 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1973): Closed hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,254 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegion(1676): Region close journal for 0c6631449afb0062d10d83de4eb96abd: Waiting for close lock at 1733897732210Running coprocessor pre-close hooks at 1733897732210Disabling compacts and flushes for region at 1733897732210Disabling writes for close at 1733897732210Obtaining lock to block concurrent updates at 1733897732210Preparing flush snapshotting stores in 0c6631449afb0062d10d83de4eb96abd at 1733897732210Finished memstore snapshotting hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., syncing WAL and waiting on mvcc, flushsize=dataSize=1384, getHeapSize=3288, getOffHeapSize=0, getCellsCount=22 at 1733897732210Flushing stores of hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. at 1733897732211 (+1 ms)Flushing 0c6631449afb0062d10d83de4eb96abd/l: creating writer at 1733897732211Flushing 0c6631449afb0062d10d83de4eb96abd/l: appending metadata at 1733897732227 (+16 ms)Flushing 0c6631449afb0062d10d83de4eb96abd/l: closing flushed file at 1733897732227Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@237f70c4: reopening flushed file at 1733897732241 (+14 ms)Finished flush of dataSize ~1.35 KB/1384, heapSize ~3.21 KB/3288, currentSize=0 B/0 for 0c6631449afb0062d10d83de4eb96abd in 39ms, sequenceid=26, compaction requested=false at 1733897732250 (+9 ms)Writing region close event to WAL at 1733897732250Running coprocessor post-close hooks at 1733897732254 (+4 ms)Closed at 1733897732254 2024-12-11T06:15:32,255 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] regionserver.HRegionServer(3302): Adding 0c6631449afb0062d10d83de4eb96abd move to b66397bb9319,44539,1733897428597 record at close sequenceid=26 2024-12-11T06:15:32,258 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=207}] handler.UnassignRegionHandler(157): Closed 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,258 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=CLOSED 2024-12-11T06:15:32,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:32,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-11T06:15:32,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,35939,1733897428441 in 205 msec 2024-12-11T06:15:32,263 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=206, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE; state=CLOSED, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:15:32,414 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T06:15:32,415 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:32,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE because future has completed 2024-12-11T06:15:32,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=206, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:15:32,575 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] handler.AssignRegionHandler(132): Open hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,575 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(7752): Opening region: {ENCODED => 0c6631449afb0062d10d83de4eb96abd, NAME => 'hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.', STARTKEY => '', ENDKEY => ''} 2024-12-11T06:15:32,575 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. service=AccessControlService 2024-12-11T06:15:32,576 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:32,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(898): Instantiated hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:32,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(7794): checking encryption for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,576 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(7797): checking classloading for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,584 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,585 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c6631449afb0062d10d83de4eb96abd columnFamilyName l 2024-12-11T06:15:32,585 DEBUG [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:32,595 INFO [StoreFileOpener-0c6631449afb0062d10d83de4eb96abd-l-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,595 DEBUG [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/l/84b3e9d04f684b81a21fead5ab7d8e19 2024-12-11T06:15:32,595 INFO [StoreOpener-0c6631449afb0062d10d83de4eb96abd-1 {}] regionserver.HStore(327): Store=0c6631449afb0062d10d83de4eb96abd/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:32,595 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1038): replaying wal for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,596 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,598 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,599 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1048): stopping wal replay for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,599 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1060): Cleaning up temporary data for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,604 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1093): writing seq id for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,605 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1114): Opened 0c6631449afb0062d10d83de4eb96abd; next sequenceid=30; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64517378, jitterRate=-0.03861615061759949}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:32,605 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:15:32,609 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-11T06:15:32,610 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-11T06:15:32,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-11T06:15:32,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-11T06:15:32,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-11T06:15:32,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportExpiredSnapshot 2024-12-11T06:15:32,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,616 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,617 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:32,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:32,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:32,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:32,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,621 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:32,624 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegion(1006): Region open journal for 0c6631449afb0062d10d83de4eb96abd: Running coprocessor pre-open hook at 1733897732576Writing region info on filesystem at 1733897732576Initializing all the Stores at 1733897732577 (+1 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733897732577Cleaning up temporary data from old regions at 1733897732599 (+22 ms)Running coprocessor post-open hooks at 1733897732605 (+6 ms)Region opened successfully at 1733897732624 (+19 ms) 2024-12-11T06:15:32,625 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., pid=208, masterSystemTime=1733897732570 2024-12-11T06:15:32,628 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,628 INFO [RS_OPEN_PRIORITY_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=208}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:15:32,629 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=0c6631449afb0062d10d83de4eb96abd, regionState=OPEN, openSeqNum=30, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:32,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=206, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:15:32,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-12-11T06:15:32,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; OpenRegionProcedure 0c6631449afb0062d10d83de4eb96abd, server=b66397bb9319,44539,1733897428597 in 215 msec 2024-12-11T06:15:32,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=0c6631449afb0062d10d83de4eb96abd, REOPEN/MOVE in 583 msec 2024-12-11T06:15:32,657 INFO [master/b66397bb9319:0.Chore.1 {}] master.HMaster(2172): balance hri=122c4bf679e9253b1a206af0572eb5cb, source=b66397bb9319,35939,1733897428441, destination=b66397bb9319,44539,1733897428597 2024-12-11T06:15:32,659 DEBUG [master/b66397bb9319:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=209, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE 2024-12-11T06:15:32,659 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE 2024-12-11T06:15:32,660 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:32,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE because future has completed 2024-12-11T06:15:32,663 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:32,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:32,817 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:32,817 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:32,817 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 122c4bf679e9253b1a206af0572eb5cb, disabling compactions & flushes 2024-12-11T06:15:32,817 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:32,817 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:32,817 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. after waiting 0 ms 2024-12-11T06:15:32,817 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:32,821 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:15:32,822 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:32,822 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:32,822 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 122c4bf679e9253b1a206af0572eb5cb: Waiting for close lock at 1733897732817Running coprocessor pre-close hooks at 1733897732817Disabling compacts and flushes for region at 1733897732817Disabling writes for close at 1733897732817Writing region close event to WAL at 1733897732818 (+1 ms)Running coprocessor post-close hooks at 1733897732822 (+4 ms)Closed at 1733897732822 2024-12-11T06:15:32,822 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegionServer(3302): Adding 122c4bf679e9253b1a206af0572eb5cb move to b66397bb9319,44539,1733897428597 record at close sequenceid=5 2024-12-11T06:15:32,824 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:32,824 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=CLOSED 2024-12-11T06:15:32,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:32,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-11T06:15:32,829 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,35939,1733897428441 in 164 msec 2024-12-11T06:15:32,831 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=209, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE; state=CLOSED, location=b66397bb9319,44539,1733897428597; forceNewPlan=false, retain=false 2024-12-11T06:15:32,982 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T06:15:32,982 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=OPENING, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:32,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE because future has completed 2024-12-11T06:15:32,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,44539,1733897428597}] 2024-12-11T06:15:33,139 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:33,139 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(7752): Opening region: {ENCODED => 122c4bf679e9253b1a206af0572eb5cb, NAME => 'testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:15:33,139 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. service=AccessControlService 2024-12-11T06:15:33,140 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:33,140 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,140 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:33,140 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(7794): checking encryption for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,140 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(7797): checking classloading for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,141 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,142 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 122c4bf679e9253b1a206af0572eb5cb columnFamilyName cf 2024-12-11T06:15:33,143 DEBUG [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:33,154 DEBUG [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/cf/619f43f68487482c9afcab08155d889c 2024-12-11T06:15:33,154 INFO [StoreOpener-122c4bf679e9253b1a206af0572eb5cb-1 {}] regionserver.HStore(327): Store=122c4bf679e9253b1a206af0572eb5cb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:33,154 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1038): replaying wal for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,155 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,156 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,157 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1048): stopping wal replay for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,157 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1060): Cleaning up temporary data for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,159 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1093): writing seq id for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,160 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1114): Opened 122c4bf679e9253b1a206af0572eb5cb; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75323996, jitterRate=0.12241500616073608}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:33,160 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:15:33,160 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegion(1006): Region open journal for 122c4bf679e9253b1a206af0572eb5cb: Running coprocessor pre-open hook at 1733897733140Writing region info on filesystem at 1733897733140Initializing all the Stores at 1733897733141 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897733141Cleaning up temporary data from old regions at 1733897733157 (+16 ms)Running coprocessor post-open hooks at 1733897733160 (+3 ms)Region opened successfully at 1733897733160 2024-12-11T06:15:33,161 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb., pid=211, masterSystemTime=1733897733136 2024-12-11T06:15:33,163 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:33,163 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=211}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:15:33,164 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=122c4bf679e9253b1a206af0572eb5cb, regionState=OPEN, openSeqNum=9, regionLocation=b66397bb9319,44539,1733897428597 2024-12-11T06:15:33,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,44539,1733897428597 because future has completed 2024-12-11T06:15:33,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-11T06:15:33,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; OpenRegionProcedure 122c4bf679e9253b1a206af0572eb5cb, server=b66397bb9319,44539,1733897428597 in 184 msec 2024-12-11T06:15:33,172 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=122c4bf679e9253b1a206af0572eb5cb, REOPEN/MOVE in 513 msec 2024-12-11T06:15:33,259 DEBUG [master/b66397bb9319:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-11T06:15:33,260 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 0c6631449afb0062d10d83de4eb96abd changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:33,260 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 122c4bf679e9253b1a206af0572eb5cb changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:15:33,267 DEBUG [master/b66397bb9319:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T06:15:33,271 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testEmptyExportFileSystemState because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T06:15:33,271 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-11T06:15:33,285 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:15:33,285 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:15:33,290 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:33,290 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:15:33,290 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:15:33,290 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:33,291 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T06:15:33,291 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T06:15:33,291 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:33,291 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-11T06:15:33,291 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897721987/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-11T06:15:33,295 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-11T06:15:33,298 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897733297"}]},"ts":"1733897733297"} 2024-12-11T06:15:33,299 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-11T06:15:33,299 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-11T06:15:33,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-11T06:15:33,301 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, UNASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, UNASSIGN}] 2024-12-11T06:15:33,302 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, UNASSIGN 2024-12-11T06:15:33,302 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, UNASSIGN 2024-12-11T06:15:33,302 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=e787fa3b025c3182185cfac52b647d88, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:33,302 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=cd0db02aed95bd6781cec37ad5803637, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:33,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, UNASSIGN because future has completed 2024-12-11T06:15:33,304 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:33,304 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; CloseRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:33,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, UNASSIGN because future has completed 2024-12-11T06:15:33,305 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:15:33,305 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:33,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-11T06:15:33,456 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(122): Close e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:33,456 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:33,456 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1722): Closing e787fa3b025c3182185cfac52b647d88, disabling compactions & flushes 2024-12-11T06:15:33,456 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:33,456 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:33,456 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. after waiting 0 ms 2024-12-11T06:15:33,456 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:33,458 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(122): Close cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:33,458 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:15:33,458 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1722): Closing cd0db02aed95bd6781cec37ad5803637, disabling compactions & flushes 2024-12-11T06:15:33,458 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:33,458 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:33,458 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. after waiting 0 ms 2024-12-11T06:15:33,458 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:33,460 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:33,461 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:33,461 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88. 2024-12-11T06:15:33,461 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] regionserver.HRegion(1676): Region close journal for e787fa3b025c3182185cfac52b647d88: Waiting for close lock at 1733897733456Running coprocessor pre-close hooks at 1733897733456Disabling compacts and flushes for region at 1733897733456Disabling writes for close at 1733897733456Writing region close event to WAL at 1733897733457 (+1 ms)Running coprocessor post-close hooks at 1733897733461 (+4 ms)Closed at 1733897733461 2024-12-11T06:15:33,462 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:15:33,462 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:15:33,462 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637. 2024-12-11T06:15:33,462 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] regionserver.HRegion(1676): Region close journal for cd0db02aed95bd6781cec37ad5803637: Waiting for close lock at 1733897733458Running coprocessor pre-close hooks at 1733897733458Disabling compacts and flushes for region at 1733897733458Disabling writes for close at 1733897733458Writing region close event to WAL at 1733897733459 (+1 ms)Running coprocessor post-close hooks at 1733897733462 (+3 ms)Closed at 1733897733462 2024-12-11T06:15:33,462 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=216}] handler.UnassignRegionHandler(157): Closed e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:33,463 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=e787fa3b025c3182185cfac52b647d88, regionState=CLOSED 2024-12-11T06:15:33,464 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=217}] handler.UnassignRegionHandler(157): Closed cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:33,464 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=cd0db02aed95bd6781cec37ad5803637, regionState=CLOSED 2024-12-11T06:15:33,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; CloseRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:33,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; CloseRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:33,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-11T06:15:33,467 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; CloseRegionProcedure e787fa3b025c3182185cfac52b647d88, server=b66397bb9319,35939,1733897428441 in 161 msec 2024-12-11T06:15:33,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=e787fa3b025c3182185cfac52b647d88, UNASSIGN in 166 msec 2024-12-11T06:15:33,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-11T06:15:33,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; CloseRegionProcedure cd0db02aed95bd6781cec37ad5803637, server=b66397bb9319,42579,1733897428663 in 161 msec 2024-12-11T06:15:33,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-11T06:15:33,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=cd0db02aed95bd6781cec37ad5803637, UNASSIGN in 167 msec 2024-12-11T06:15:33,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=213, resume processing ppid=212 2024-12-11T06:15:33,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, ppid=212, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 170 msec 2024-12-11T06:15:33,473 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897733473"}]},"ts":"1733897733473"} 2024-12-11T06:15:33,474 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-11T06:15:33,474 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-11T06:15:33,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 179 msec 2024-12-11T06:15:33,558 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:15:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-11T06:15:33,617 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T06:15:33,617 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,619 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=218, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,621 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=218, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,623 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35939 {}] ipc.CallRunner(138): callId: 389 service: ClientService methodName: Mutate size: 167 connection: 172.17.0.2:38969 deadline: 1733897793620, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b66397bb9319 port=44539 startCode=1733897428597. As of locationSeqNum=26. 2024-12-11T06:15:33,624 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:33,624 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:33,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2 , the old value is region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b66397bb9319 port=44539 startCode=1733897428597. As of locationSeqNum=26. 2024-12-11T06:15:33,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b66397bb9319 port=44539 startCode=1733897428597. As of locationSeqNum=26. 2024-12-11T06:15:33,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncRegionLocatorHelper(84): Try updating region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,35939,1733897428441, seqNum=2 with the new location region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=26 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=b66397bb9319 port=44539 startCode=1733897428597. As of locationSeqNum=26. 2024-12-11T06:15:33,626 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/recovered.edits] 2024-12-11T06:15:33,626 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/recovered.edits] 2024-12-11T06:15:33,629 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/cf/dfb8103c8e66490087a2555ab25cd75b 2024-12-11T06:15:33,629 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/cf/b9e883081be0433597e0431b22b68775 2024-12-11T06:15:33,632 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637/recovered.edits/9.seqid 2024-12-11T06:15:33,632 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88/recovered.edits/9.seqid 2024-12-11T06:15:33,632 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:33,632 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testEmptyExportFileSystemState/e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:33,632 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-11T06:15:33,633 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-11T06:15:33,633 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-11T06:15:33,636 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241211e1024efc698d4d079300781c70473f88_e787fa3b025c3182185cfac52b647d88 2024-12-11T06:15:33,636 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241211ed11a6a5d45b4624a3c2f7539a4b87cd_cd0db02aed95bd6781cec37ad5803637 2024-12-11T06:15:33,637 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-11T06:15:33,639 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=218, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,641 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-11T06:15:33,643 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-11T06:15:33,644 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=218, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,644 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-11T06:15:33,644 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897733644"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:33,644 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897733644"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:33,646 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:15:33,646 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cd0db02aed95bd6781cec37ad5803637, NAME => 'testtb-testEmptyExportFileSystemState,,1733897720133.cd0db02aed95bd6781cec37ad5803637.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e787fa3b025c3182185cfac52b647d88, NAME => 'testtb-testEmptyExportFileSystemState,1,1733897720133.e787fa3b025c3182185cfac52b647d88.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:15:33,646 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-11T06:15:33,646 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897733646"}]},"ts":"9223372036854775807"} 2024-12-11T06:15:33,648 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-11T06:15:33,648 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=218, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,649 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 31 msec 2024-12-11T06:15:33,736 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:33,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33615, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:33,739 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:33,740 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:33,740 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:33,741 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-11T06:15:33,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=30] 2024-12-11T06:15:33,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:33,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54727, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-11T06:15:33,745 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T06:15:33,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T06:15:33,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T06:15:33,749 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-11T06:15:33,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:33,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:33,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:33,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-11T06:15:33,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,752 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:33,752 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-11T06:15:33,752 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-11T06:15:33,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:33,752 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:33,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:33,753 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:33,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T06:15:33,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:33,760 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-11T06:15:33,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-11T06:15:33,787 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=817 (was 806) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-494827074_1 at /127.0.0.1:36962 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36251 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:36251 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:48016 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45391 from appattempt_1733897436125_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:43345 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-7344 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:40196 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 20067) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:36936 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-494827074_1 at /127.0.0.1:48042 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 773) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=735 (was 720) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1303 (was 2126) 2024-12-11T06:15:33,787 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-11T06:15:33,805 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=817, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=735, ProcessCount=17, AvailableMemoryMB=1303 2024-12-11T06:15:33,805 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-12-11T06:15:33,806 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:15:33,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=219, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:15:33,808 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:15:33,808 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 219 2024-12-11T06:15:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=219 2024-12-11T06:15:33,809 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:15:33,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742324_1500 (size=440) 2024-12-11T06:15:33,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742324_1500 (size=440) 2024-12-11T06:15:33,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742324_1500 (size=440) 2024-12-11T06:15:33,817 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 72540bc724c1dade0fb1e882306b217f, NAME => 'testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:33,818 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2a7c8dd9fcbbca25b42780a0d7db509d, NAME => 'testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:33,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742325_1501 (size=65) 2024-12-11T06:15:33,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742326_1502 (size=65) 2024-12-11T06:15:33,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742325_1501 (size=65) 2024-12-11T06:15:33,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742326_1502 (size=65) 2024-12-11T06:15:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742326_1502 (size=65) 2024-12-11T06:15:33,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742325_1501 (size=65) 2024-12-11T06:15:33,831 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:33,831 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 72540bc724c1dade0fb1e882306b217f, disabling compactions & flushes 2024-12-11T06:15:33,831 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:33,831 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:33,831 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. after waiting 0 ms 2024-12-11T06:15:33,831 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:33,831 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 72540bc724c1dade0fb1e882306b217f: Waiting for close lock at 1733897733831Disabling compacts and flushes for region at 1733897733831Disabling writes for close at 1733897733831Writing region close event to WAL at 1733897733831Closed at 1733897733831 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 2a7c8dd9fcbbca25b42780a0d7db509d, disabling compactions & flushes 2024-12-11T06:15:33,832 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. after waiting 0 ms 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:33,832 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:33,832 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2a7c8dd9fcbbca25b42780a0d7db509d: Waiting for close lock at 1733897733832Disabling compacts and flushes for region at 1733897733832Disabling writes for close at 1733897733832Writing region close event to WAL at 1733897733832Closed at 1733897733832 2024-12-11T06:15:33,833 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:15:33,833 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897733833"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897733833"}]},"ts":"1733897733833"} 2024-12-11T06:15:33,833 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733897733833"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897733833"}]},"ts":"1733897733833"} 2024-12-11T06:15:33,835 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:15:33,836 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:15:33,836 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897733836"}]},"ts":"1733897733836"} 2024-12-11T06:15:33,837 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-11T06:15:33,838 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:15:33,839 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:15:33,839 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:15:33,839 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:15:33,839 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:15:33,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=220, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, ASSIGN}, {pid=221, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, ASSIGN}] 2024-12-11T06:15:33,840 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, ASSIGN 2024-12-11T06:15:33,840 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=221, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, ASSIGN 2024-12-11T06:15:33,841 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=220, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:15:33,841 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=221, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:15:33,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=219 2024-12-11T06:15:33,991 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:15:33,992 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=221 updating hbase:meta row=2a7c8dd9fcbbca25b42780a0d7db509d, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:33,992 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=220 updating hbase:meta row=72540bc724c1dade0fb1e882306b217f, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:33,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=221, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, ASSIGN because future has completed 2024-12-11T06:15:33,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:15:33,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=220, ppid=219, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, ASSIGN because future has completed 2024-12-11T06:15:33,998 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=223, ppid=220, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:15:34,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=219 2024-12-11T06:15:34,152 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,152 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(7752): Opening region: {ENCODED => 2a7c8dd9fcbbca25b42780a0d7db509d, NAME => 'testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:15:34,152 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,152 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. service=AccessControlService 2024-12-11T06:15:34,152 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(7752): Opening region: {ENCODED => 72540bc724c1dade0fb1e882306b217f, NAME => 'testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:15:34,152 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. service=AccessControlService 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,153 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(7794): checking encryption for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(7797): checking classloading for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(7794): checking encryption for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,153 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(7797): checking classloading for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,154 INFO [StoreOpener-2a7c8dd9fcbbca25b42780a0d7db509d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,154 INFO [StoreOpener-72540bc724c1dade0fb1e882306b217f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,155 INFO [StoreOpener-2a7c8dd9fcbbca25b42780a0d7db509d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a7c8dd9fcbbca25b42780a0d7db509d columnFamilyName cf 2024-12-11T06:15:34,155 INFO [StoreOpener-72540bc724c1dade0fb1e882306b217f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72540bc724c1dade0fb1e882306b217f columnFamilyName cf 2024-12-11T06:15:34,157 DEBUG [StoreOpener-72540bc724c1dade0fb1e882306b217f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:34,157 INFO [StoreOpener-72540bc724c1dade0fb1e882306b217f-1 {}] regionserver.HStore(327): Store=72540bc724c1dade0fb1e882306b217f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:34,157 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1038): replaying wal for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,158 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,158 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,158 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1048): stopping wal replay for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,158 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1060): Cleaning up temporary data for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,159 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1093): writing seq id for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,160 DEBUG [StoreOpener-2a7c8dd9fcbbca25b42780a0d7db509d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:34,160 INFO [StoreOpener-2a7c8dd9fcbbca25b42780a0d7db509d-1 {}] regionserver.HStore(327): Store=2a7c8dd9fcbbca25b42780a0d7db509d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:15:34,161 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1038): replaying wal for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,161 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:34,161 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1114): Opened 72540bc724c1dade0fb1e882306b217f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74431261, jitterRate=0.1091122180223465}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:34,161 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,161 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,162 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,162 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegion(1006): Region open journal for 72540bc724c1dade0fb1e882306b217f: Running coprocessor pre-open hook at 1733897734153Writing region info on filesystem at 1733897734153Initializing all the Stores at 1733897734154 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897734154Cleaning up temporary data from old regions at 1733897734158 (+4 ms)Running coprocessor post-open hooks at 1733897734161 (+3 ms)Region opened successfully at 1733897734162 (+1 ms) 2024-12-11T06:15:34,162 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1048): stopping wal replay for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,162 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1060): Cleaning up temporary data for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,162 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f., pid=223, masterSystemTime=1733897734150 2024-12-11T06:15:34,163 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1093): writing seq id for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,164 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,164 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=223}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,165 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:15:34,165 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=220 updating hbase:meta row=72540bc724c1dade0fb1e882306b217f, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:15:34,165 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1114): Opened 2a7c8dd9fcbbca25b42780a0d7db509d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75182287, jitterRate=0.12030337750911713}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:15:34,165 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,165 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegion(1006): Region open journal for 2a7c8dd9fcbbca25b42780a0d7db509d: Running coprocessor pre-open hook at 1733897734153Writing region info on filesystem at 1733897734153Initializing all the Stores at 1733897734154 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897734154Cleaning up temporary data from old regions at 1733897734162 (+8 ms)Running coprocessor post-open hooks at 1733897734165 (+3 ms)Region opened successfully at 1733897734165 2024-12-11T06:15:34,166 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d., pid=222, masterSystemTime=1733897734149 2024-12-11T06:15:34,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=223, ppid=220, state=RUNNABLE, hasLock=false; OpenRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:15:34,167 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,167 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=222}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,168 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=221 updating hbase:meta row=2a7c8dd9fcbbca25b42780a0d7db509d, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:15:34,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=220 2024-12-11T06:15:34,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=220, state=SUCCESS, hasLock=false; OpenRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441 in 169 msec 2024-12-11T06:15:34,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=219, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, ASSIGN in 330 msec 2024-12-11T06:15:34,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=222, ppid=221, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:15:34,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-11T06:15:34,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; OpenRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663 in 174 msec 2024-12-11T06:15:34,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=221, resume processing ppid=219 2024-12-11T06:15:34,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, ppid=219, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, ASSIGN in 333 msec 2024-12-11T06:15:34,174 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:15:34,174 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897734174"}]},"ts":"1733897734174"} 2024-12-11T06:15:34,176 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-11T06:15:34,176 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=219, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:15:34,176 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-11T06:15:34,179 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T06:15:34,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:34,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:34,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:34,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:15:34,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,183 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-11T06:15:34,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 377 msec 2024-12-11T06:15:34,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=219 2024-12-11T06:15:34,437 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T06:15:34,437 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,439 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-11T06:15:34,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,440 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:34,441 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,446 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,450 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T06:15:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897734452 (current time:1733897734452). 2024-12-11T06:15:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T06:15:34,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:34,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3db9f833, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:34,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:34,454 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ded4a03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:34,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,455 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:34,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74c0b499, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:34,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:34,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,457 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51744, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,458 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:34,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:34,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,458 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:34,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6de7aa07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:34,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:34,460 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4e7ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:34,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,461 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:34,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c8c7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:34,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:34,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,463 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=30] 2024-12-11T06:15:34,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,466 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:34,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:34,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,467 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:34,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T06:15:34,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:34,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T06:15:34,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-12-11T06:15:34,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T06:15:34,469 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:34,470 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:34,472 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742327_1503 (size=161) 2024-12-11T06:15:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742327_1503 (size=161) 2024-12-11T06:15:34,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742327_1503 (size=161) 2024-12-11T06:15:34,478 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:34,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f}, {pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d}] 2024-12-11T06:15:34,479 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,479 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T06:15:34,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=225 2024-12-11T06:15:34,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=226 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(2603): Flush status journal for 2a7c8dd9fcbbca25b42780a0d7db509d: 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(2603): Flush status journal for 72540bc724c1dade0fb1e882306b217f: 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. for emptySnaptb0-testExportWithChecksum completed. 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:34,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:15:34,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742328_1504 (size=68) 2024-12-11T06:15:34,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742328_1504 (size=68) 2024-12-11T06:15:34,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742328_1504 (size=68) 2024-12-11T06:15:34,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,642 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=225 2024-12-11T06:15:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=225 2024-12-11T06:15:34,642 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,643 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,645 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f in 165 msec 2024-12-11T06:15:34,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742329_1505 (size=68) 2024-12-11T06:15:34,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742329_1505 (size=68) 2024-12-11T06:15:34,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742329_1505 (size=68) 2024-12-11T06:15:34,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=226 2024-12-11T06:15:34,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=226 2024-12-11T06:15:34,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,651 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-12-11T06:15:34,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d in 173 msec 2024-12-11T06:15:34,653 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:34,653 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:34,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:34,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:34,654 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:34,655 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:15:34,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742330_1506 (size=60) 2024-12-11T06:15:34,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742330_1506 (size=60) 2024-12-11T06:15:34,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742330_1506 (size=60) 2024-12-11T06:15:34,661 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:34,661 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-11T06:15:34,662 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-11T06:15:34,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742331_1507 (size=641) 2024-12-11T06:15:34,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742331_1507 (size=641) 2024-12-11T06:15:34,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742331_1507 (size=641) 2024-12-11T06:15:34,670 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:34,674 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:34,675 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-11T06:15:34,676 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:34,676 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-12-11T06:15:34,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 209 msec 2024-12-11T06:15:34,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-11T06:15:34,787 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T06:15:34,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:34,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:15:34,796 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-11T06:15:34,798 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,798 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:15:34,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,804 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-11T06:15:34,811 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897734811 (current time:1733897734811). 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f8cc9ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:34,812 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:34,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:34,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:34,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27d2d068, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:34,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:34,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,813 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:34,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@562ca283, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:34,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:34,815 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,816 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,817 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,817 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26ef9563, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:15:34,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:15:34,818 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:15:34,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:15:34,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:15:34,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300a57a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:15:34,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:15:34,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,819 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33684, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:15:34,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1178db80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:15:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:15:34,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:15:34,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=30] 2024-12-11T06:15:34,823 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:15:34,824 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39718, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:15:34,825 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:15:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:15:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:15:34,825 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:15:34,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-11T06:15:34,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:15:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-11T06:15:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 227 2024-12-11T06:15:34,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T06:15:34,827 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:15:34,828 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:15:34,830 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:15:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742332_1508 (size=156) 2024-12-11T06:15:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742332_1508 (size=156) 2024-12-11T06:15:34,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742332_1508 (size=156) 2024-12-11T06:15:34,836 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:15:34,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f}, {pid=229, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d}] 2024-12-11T06:15:34,837 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:34,837 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=228, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:34,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T06:15:34,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=229 2024-12-11T06:15:34,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=228 2024-12-11T06:15:34,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:34,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:34,989 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HRegion(2902): Flushing 72540bc724c1dade0fb1e882306b217f 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T06:15:34,989 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HRegion(2902): Flushing 2a7c8dd9fcbbca25b42780a0d7db509d 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T06:15:35,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f is 71, key is 03e021864639f03720bdcc713e9fdd88/cf:q/1733897734791/Put/seqid=0 2024-12-11T06:15:35,010 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d is 71, key is 1bc32879edeacd4ca8a90118e9791cf9/cf:q/1733897734795/Put/seqid=0 2024-12-11T06:15:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742333_1509 (size=5171) 2024-12-11T06:15:35,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742333_1509 (size=5171) 2024-12-11T06:15:35,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742333_1509 (size=5171) 2024-12-11T06:15:35,012 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:35,016 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:35,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/.tmp/cf/5bb030151b8e4719bebb2ad86942eeb3, store: [table=testtb-testExportWithChecksum family=cf region=72540bc724c1dade0fb1e882306b217f] 2024-12-11T06:15:35,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/.tmp/cf/5bb030151b8e4719bebb2ad86942eeb3 is 206, key is 06e4f2304709250385a42656892e2d27c/cf:q/1733897734791/Put/seqid=0 2024-12-11T06:15:35,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742334_1510 (size=8101) 2024-12-11T06:15:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742334_1510 (size=8101) 2024-12-11T06:15:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742335_1511 (size=6106) 2024-12-11T06:15:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742334_1510 (size=8101) 2024-12-11T06:15:35,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742335_1511 (size=6106) 2024-12-11T06:15:35,033 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742335_1511 (size=6106) 2024-12-11T06:15:35,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/.tmp/cf/5bb030151b8e4719bebb2ad86942eeb3 2024-12-11T06:15:35,040 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:35,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/.tmp/cf/d95b4d3246a64301bc896c806ed04b58, store: [table=testtb-testExportWithChecksum family=cf region=2a7c8dd9fcbbca25b42780a0d7db509d] 2024-12-11T06:15:35,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/.tmp/cf/5bb030151b8e4719bebb2ad86942eeb3 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 2024-12-11T06:15:35,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/.tmp/cf/d95b4d3246a64301bc896c806ed04b58 is 206, key is 1d3f75722a682e06c039b088bb6d1b014/cf:q/1733897734795/Put/seqid=0 2024-12-11T06:15:35,050 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3, entries=4, sequenceid=6, filesize=6.0 K 2024-12-11T06:15:35,051 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 72540bc724c1dade0fb1e882306b217f in 62ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:35,051 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.HRegion(2603): Flush status journal for 72540bc724c1dade0fb1e882306b217f: 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. for snaptb0-testExportWithChecksum completed. 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3] hfiles 2024-12-11T06:15:35,052 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T06:15:35,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742336_1512 (size=14651) 2024-12-11T06:15:35,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742336_1512 (size=14651) 2024-12-11T06:15:35,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742336_1512 (size=14651) 2024-12-11T06:15:35,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/.tmp/cf/d95b4d3246a64301bc896c806ed04b58 2024-12-11T06:15:35,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/.tmp/cf/d95b4d3246a64301bc896c806ed04b58 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 2024-12-11T06:15:35,067 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58, entries=46, sequenceid=6, filesize=14.3 K 2024-12-11T06:15:35,068 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 2a7c8dd9fcbbca25b42780a0d7db509d in 79ms, sequenceid=6, compaction requested=false 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.HRegion(2603): Flush status journal for 2a7c8dd9fcbbca25b42780a0d7db509d: 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. for snaptb0-testExportWithChecksum completed. 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58] hfiles 2024-12-11T06:15:35,068 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 for snapshot=snaptb0-testExportWithChecksum 2024-12-11T06:15:35,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742337_1513 (size=107) 2024-12-11T06:15:35,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742337_1513 (size=107) 2024-12-11T06:15:35,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742337_1513 (size=107) 2024-12-11T06:15:35,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:15:35,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=228}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=228 2024-12-11T06:15:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=228 2024-12-11T06:15:35,073 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:35,073 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=228, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:35,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742338_1514 (size=107) 2024-12-11T06:15:35,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742338_1514 (size=107) 2024-12-11T06:15:35,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 72540bc724c1dade0fb1e882306b217f in 238 msec 2024-12-11T06:15:35,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742338_1514 (size=107) 2024-12-11T06:15:35,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:15:35,075 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=229}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=229 2024-12-11T06:15:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=229 2024-12-11T06:15:35,076 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:35,076 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=227, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:35,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-11T06:15:35,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d in 241 msec 2024-12-11T06:15:35,078 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:15:35,079 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:15:35,079 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:15:35,079 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:15:35,079 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:15:35,080 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f] hfiles 2024-12-11T06:15:35,080 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:15:35,080 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f 2024-12-11T06:15:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742339_1515 (size=291) 2024-12-11T06:15:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742339_1515 (size=291) 2024-12-11T06:15:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742339_1515 (size=291) 2024-12-11T06:15:35,089 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:15:35,089 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-11T06:15:35,089 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T06:15:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742340_1516 (size=951) 2024-12-11T06:15:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742340_1516 (size=951) 2024-12-11T06:15:35,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742340_1516 (size=951) 2024-12-11T06:15:35,107 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:15:35,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:15:35,112 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T06:15:35,114 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=227, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:15:35,114 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 227 2024-12-11T06:15:35,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=227, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 288 msec 2024-12-11T06:15:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-12-11T06:15:35,147 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T06:15:35,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147 2024-12-11T06:15:35,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:35,176 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:15:35,176 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@6b7ca24a, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T06:15:35,178 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:15:35,182 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T06:15:35,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:35,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:35,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,234 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-17167356903148181571.jar 2024-12-11T06:15:36,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-3689805814833540038.jar 2024-12-11T06:15:36,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,310 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:15:36,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:15:36,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:15:36,311 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:15:36,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:15:36,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:15:36,312 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:15:36,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:15:36,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:15:36,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:15:36,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:15:36,313 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:15:36,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:36,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:36,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:36,314 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:36,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:15:36,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:36,315 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:15:36,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742341_1517 (size=131440) 2024-12-11T06:15:36,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742341_1517 (size=131440) 2024-12-11T06:15:36,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742341_1517 (size=131440) 2024-12-11T06:15:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742342_1518 (size=4188619) 2024-12-11T06:15:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742342_1518 (size=4188619) 2024-12-11T06:15:36,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742342_1518 (size=4188619) 2024-12-11T06:15:36,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742343_1519 (size=1323991) 2024-12-11T06:15:36,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742343_1519 (size=1323991) 2024-12-11T06:15:36,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742343_1519 (size=1323991) 2024-12-11T06:15:36,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742344_1520 (size=443172) 2024-12-11T06:15:36,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742344_1520 (size=443172) 2024-12-11T06:15:36,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742344_1520 (size=443172) 2024-12-11T06:15:36,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742345_1521 (size=903542) 2024-12-11T06:15:36,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742345_1521 (size=903542) 2024-12-11T06:15:36,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742345_1521 (size=903542) 2024-12-11T06:15:36,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742346_1522 (size=8360360) 2024-12-11T06:15:36,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742346_1522 (size=8360360) 2024-12-11T06:15:36,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742346_1522 (size=8360360) 2024-12-11T06:15:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742347_1523 (size=6426220) 2024-12-11T06:15:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742347_1523 (size=6426220) 2024-12-11T06:15:36,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742347_1523 (size=6426220) 2024-12-11T06:15:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742348_1524 (size=1877034) 2024-12-11T06:15:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742348_1524 (size=1877034) 2024-12-11T06:15:36,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742348_1524 (size=1877034) 2024-12-11T06:15:36,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742349_1525 (size=77835) 2024-12-11T06:15:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742349_1525 (size=77835) 2024-12-11T06:15:36,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742349_1525 (size=77835) 2024-12-11T06:15:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742350_1526 (size=30949) 2024-12-11T06:15:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742350_1526 (size=30949) 2024-12-11T06:15:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742350_1526 (size=30949) 2024-12-11T06:15:36,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742351_1527 (size=1597213) 2024-12-11T06:15:36,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742351_1527 (size=1597213) 2024-12-11T06:15:36,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742351_1527 (size=1597213) 2024-12-11T06:15:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742352_1528 (size=4695811) 2024-12-11T06:15:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742352_1528 (size=4695811) 2024-12-11T06:15:36,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742352_1528 (size=4695811) 2024-12-11T06:15:36,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742353_1529 (size=232957) 2024-12-11T06:15:36,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742353_1529 (size=232957) 2024-12-11T06:15:36,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742353_1529 (size=232957) 2024-12-11T06:15:36,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742354_1530 (size=127628) 2024-12-11T06:15:36,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742354_1530 (size=127628) 2024-12-11T06:15:36,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742354_1530 (size=127628) 2024-12-11T06:15:36,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742355_1531 (size=20406) 2024-12-11T06:15:36,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742355_1531 (size=20406) 2024-12-11T06:15:36,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742355_1531 (size=20406) 2024-12-11T06:15:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742356_1532 (size=5175431) 2024-12-11T06:15:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742356_1532 (size=5175431) 2024-12-11T06:15:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742356_1532 (size=5175431) 2024-12-11T06:15:36,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742357_1533 (size=217634) 2024-12-11T06:15:36,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742357_1533 (size=217634) 2024-12-11T06:15:36,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742357_1533 (size=217634) 2024-12-11T06:15:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742358_1534 (size=1832290) 2024-12-11T06:15:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742358_1534 (size=1832290) 2024-12-11T06:15:36,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742358_1534 (size=1832290) 2024-12-11T06:15:36,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742359_1535 (size=322274) 2024-12-11T06:15:36,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742359_1535 (size=322274) 2024-12-11T06:15:36,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742359_1535 (size=322274) 2024-12-11T06:15:36,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742360_1536 (size=503880) 2024-12-11T06:15:36,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742360_1536 (size=503880) 2024-12-11T06:15:36,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742360_1536 (size=503880) 2024-12-11T06:15:36,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742361_1537 (size=29229) 2024-12-11T06:15:36,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742361_1537 (size=29229) 2024-12-11T06:15:36,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742361_1537 (size=29229) 2024-12-11T06:15:36,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742362_1538 (size=24096) 2024-12-11T06:15:36,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742362_1538 (size=24096) 2024-12-11T06:15:36,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742362_1538 (size=24096) 2024-12-11T06:15:36,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742363_1539 (size=111872) 2024-12-11T06:15:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742363_1539 (size=111872) 2024-12-11T06:15:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742363_1539 (size=111872) 2024-12-11T06:15:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742364_1540 (size=45609) 2024-12-11T06:15:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742364_1540 (size=45609) 2024-12-11T06:15:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742364_1540 (size=45609) 2024-12-11T06:15:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742365_1541 (size=136454) 2024-12-11T06:15:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742365_1541 (size=136454) 2024-12-11T06:15:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742365_1541 (size=136454) 2024-12-11T06:15:36,702 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:15:36,704 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T06:15:36,706 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.3 K 2024-12-11T06:15:36,706 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-11T06:15:36,706 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-11T06:15:36,706 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T06:15:36,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742366_1542 (size=1023) 2024-12-11T06:15:36,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742366_1542 (size=1023) 2024-12-11T06:15:36,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742366_1542 (size=1023) 2024-12-11T06:15:36,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742367_1543 (size=35) 2024-12-11T06:15:36,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742367_1543 (size=35) 2024-12-11T06:15:36,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742367_1543 (size=35) 2024-12-11T06:15:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742368_1544 (size=304042) 2024-12-11T06:15:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742368_1544 (size=304042) 2024-12-11T06:15:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742368_1544 (size=304042) 2024-12-11T06:15:37,390 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:15:37,390 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:15:37,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0008_000001 (auth:SIMPLE) from 127.0.0.1:56718 2024-12-11T06:15:37,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0008/container_1733897436125_0008_01_000001/launch_container.sh] 2024-12-11T06:15:37,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0008/container_1733897436125_0008_01_000001/container_tokens] 2024-12-11T06:15:37,410 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0008/container_1733897436125_0008_01_000001/sysfs] 2024-12-11T06:15:38,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T06:15:38,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-11T06:15:38,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-11T06:15:38,282 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:44460 2024-12-11T06:15:38,641 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:15:43,558 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:15:43,898 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:56712 2024-12-11T06:15:44,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742369_1545 (size=349740) 2024-12-11T06:15:44,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742369_1545 (size=349740) 2024-12-11T06:15:44,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742369_1545 (size=349740) 2024-12-11T06:15:46,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:53238 2024-12-11T06:15:46,172 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:53536 2024-12-11T06:15:47,001 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:53254 2024-12-11T06:15:47,002 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:53540 2024-12-11T06:15:49,393 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:15:52,782 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000002/launch_container.sh] 2024-12-11T06:15:52,787 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000002/container_tokens] 2024-12-11T06:15:52,787 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:15:53,935 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 84d6f5deb75d828a55d48a410e730336, had cached 0 bytes from a total of 6086 2024-12-11T06:15:54,034 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:57076 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:15:54,951 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000005/launch_container.sh] 2024-12-11T06:15:54,951 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000005/container_tokens] 2024-12-11T06:15:54,951 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000005/sysfs] 2024-12-11T06:15:55,031 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:57088 2024-12-11T06:15:55,034 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:57096 2024-12-11T06:15:56,036 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:33178 2024-12-11T06:15:56,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:15:57,936 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000011 while processing FINISH_CONTAINERS event 2024-12-11T06:15:58,688 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-11T06:15:58,827 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000004/launch_container.sh] 2024-12-11T06:15:58,827 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000004/container_tokens] 2024-12-11T06:15:58,827 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000004/sysfs] 2024-12-11T06:15:59,804 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000003/launch_container.sh] 2024-12-11T06:15:59,804 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000003/container_tokens] 2024-12-11T06:15:59,804 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:03,070 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:42632 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:04,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000010/launch_container.sh] 2024-12-11T06:16:04,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000010/container_tokens] 2024-12-11T06:16:04,867 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:05,079 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:36834 2024-12-11T06:16:06,088 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:36842 2024-12-11T06:16:06,088 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:42646 2024-12-11T06:16:07,164 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000007/launch_container.sh] 2024-12-11T06:16:07,164 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000007/container_tokens] 2024-12-11T06:16:07,164 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000007/sysfs] 2024-12-11T06:16:08,188 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000017 while processing FINISH_CONTAINERS event 2024-12-11T06:16:08,846 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000008/launch_container.sh] 2024-12-11T06:16:08,849 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000008/container_tokens] 2024-12-11T06:16:08,849 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000008/sysfs] 2024-12-11T06:16:09,340 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000009/launch_container.sh] 2024-12-11T06:16:09,340 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000009/container_tokens] 2024-12-11T06:16:09,340 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000009/sysfs] 2024-12-11T06:16:10,629 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000013/launch_container.sh] 2024-12-11T06:16:10,629 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000013/container_tokens] 2024-12-11T06:16:10,630 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000013/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:12,122 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:36850 2024-12-11T06:16:13,898 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000014/launch_container.sh] 2024-12-11T06:16:13,898 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000014/container_tokens] 2024-12-11T06:16:13,898 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000014/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:14,821 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000016/launch_container.sh] 2024-12-11T06:16:14,822 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000016/container_tokens] 2024-12-11T06:16:14,822 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000016/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/local-export-1733897735147/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-11T06:16:15,130 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:37520 2024-12-11T06:16:16,142 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:37526 2024-12-11T06:16:16,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:46984 2024-12-11T06:16:17,576 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0c6631449afb0062d10d83de4eb96abd, had cached 0 bytes from a total of 5735 2024-12-11T06:16:18,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 122c4bf679e9253b1a206af0572eb5cb, had cached 0 bytes from a total of 14463 2024-12-11T06:16:18,375 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000022 while processing FINISH_CONTAINERS event 2024-12-11T06:16:18,756 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0009_01_000023 while processing FINISH_CONTAINERS event 2024-12-11T06:16:19,153 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2a7c8dd9fcbbca25b42780a0d7db509d, had cached 0 bytes from a total of 14651 2024-12-11T06:16:19,153 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 72540bc724c1dade0fb1e882306b217f, had cached 0 bytes from a total of 6106 2024-12-11T06:16:19,510 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000015/launch_container.sh] 2024-12-11T06:16:19,510 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000015/container_tokens] 2024-12-11T06:16:19,512 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000015/sysfs] 2024-12-11T06:16:19,792 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:46994 2024-12-11T06:16:19,792 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:37532 2024-12-11T06:16:19,794 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:37538 2024-12-11T06:16:19,908 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733897436125_0009_01_000021 is : 143 2024-12-11T06:16:19,908 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733897436125_0009_01_000019 is : 143 2024-12-11T06:16:19,925 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000019/launch_container.sh] 2024-12-11T06:16:19,925 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000019/container_tokens] 2024-12-11T06:16:19,925 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000019/sysfs] 2024-12-11T06:16:19,943 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000021/launch_container.sh] 2024-12-11T06:16:19,943 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000021/container_tokens] 2024-12-11T06:16:19,943 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000021/sysfs] 2024-12-11T06:16:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742370_1546 (size=48783) 2024-12-11T06:16:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742370_1546 (size=48783) 2024-12-11T06:16:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742370_1546 (size=48783) 2024-12-11T06:16:19,984 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733897436125_0009_01_000020 is : 143 2024-12-11T06:16:19,995 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000020/launch_container.sh] 2024-12-11T06:16:19,995 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000020/container_tokens] 2024-12-11T06:16:19,995 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000020/sysfs] 2024-12-11T06:16:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742371_1547 (size=461) 2024-12-11T06:16:20,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742371_1547 (size=461) 2024-12-11T06:16:20,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742371_1547 (size=461) 2024-12-11T06:16:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742372_1548 (size=48783) 2024-12-11T06:16:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742372_1548 (size=48783) 2024-12-11T06:16:20,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742372_1548 (size=48783) 2024-12-11T06:16:20,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742373_1549 (size=349740) 2024-12-11T06:16:20,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742373_1549 (size=349740) 2024-12-11T06:16:20,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742373_1549 (size=349740) 2024-12-11T06:16:20,069 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:37546 2024-12-11T06:16:22,143 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733897436125_0009_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:16:22,145 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145 2024-12-11T06:16:22,145 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:22,187 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:22,187 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T06:16:22,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:16:22,237 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-11T06:16:22,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742375_1551 (size=951) 2024-12-11T06:16:22,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742375_1551 (size=951) 2024-12-11T06:16:22,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742375_1551 (size=951) 2024-12-11T06:16:22,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742374_1550 (size=156) 2024-12-11T06:16:22,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742374_1550 (size=156) 2024-12-11T06:16:22,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742374_1550 (size=156) 2024-12-11T06:16:22,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:22,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:22,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-4558755231646410072.jar 2024-12-11T06:16:23,574 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-7517513355085607642.jar 2024-12-11T06:16:23,641 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,642 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:16:23,643 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:16:23,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:16:23,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:16:23,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:16:23,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:16:23,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:23,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:23,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:23,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742376_1552 (size=131440) 2024-12-11T06:16:23,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742376_1552 (size=131440) 2024-12-11T06:16:23,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742376_1552 (size=131440) 2024-12-11T06:16:23,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742377_1553 (size=4188619) 2024-12-11T06:16:23,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742377_1553 (size=4188619) 2024-12-11T06:16:23,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742377_1553 (size=4188619) 2024-12-11T06:16:23,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742378_1554 (size=1323991) 2024-12-11T06:16:23,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742378_1554 (size=1323991) 2024-12-11T06:16:23,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742378_1554 (size=1323991) 2024-12-11T06:16:23,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742379_1555 (size=903542) 2024-12-11T06:16:23,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742379_1555 (size=903542) 2024-12-11T06:16:23,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742379_1555 (size=903542) 2024-12-11T06:16:23,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742380_1556 (size=8360360) 2024-12-11T06:16:23,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742380_1556 (size=8360360) 2024-12-11T06:16:23,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742380_1556 (size=8360360) 2024-12-11T06:16:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742381_1557 (size=1877034) 2024-12-11T06:16:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742381_1557 (size=1877034) 2024-12-11T06:16:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742381_1557 (size=1877034) 2024-12-11T06:16:23,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742382_1558 (size=77835) 2024-12-11T06:16:23,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742382_1558 (size=77835) 2024-12-11T06:16:23,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742382_1558 (size=77835) 2024-12-11T06:16:23,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742383_1559 (size=30949) 2024-12-11T06:16:23,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742383_1559 (size=30949) 2024-12-11T06:16:23,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742383_1559 (size=30949) 2024-12-11T06:16:23,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742384_1560 (size=1597213) 2024-12-11T06:16:23,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742384_1560 (size=1597213) 2024-12-11T06:16:23,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742384_1560 (size=1597213) 2024-12-11T06:16:23,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742385_1561 (size=6426220) 2024-12-11T06:16:23,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742385_1561 (size=6426220) 2024-12-11T06:16:23,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742385_1561 (size=6426220) 2024-12-11T06:16:23,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742386_1562 (size=4695811) 2024-12-11T06:16:23,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742386_1562 (size=4695811) 2024-12-11T06:16:23,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742386_1562 (size=4695811) 2024-12-11T06:16:23,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742387_1563 (size=232957) 2024-12-11T06:16:23,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742387_1563 (size=232957) 2024-12-11T06:16:23,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742387_1563 (size=232957) 2024-12-11T06:16:23,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742388_1564 (size=127628) 2024-12-11T06:16:23,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742388_1564 (size=127628) 2024-12-11T06:16:23,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742388_1564 (size=127628) 2024-12-11T06:16:23,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742389_1565 (size=20406) 2024-12-11T06:16:23,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742389_1565 (size=20406) 2024-12-11T06:16:23,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742389_1565 (size=20406) 2024-12-11T06:16:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742390_1566 (size=5175431) 2024-12-11T06:16:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742390_1566 (size=5175431) 2024-12-11T06:16:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742390_1566 (size=5175431) 2024-12-11T06:16:23,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742391_1567 (size=217634) 2024-12-11T06:16:23,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742391_1567 (size=217634) 2024-12-11T06:16:23,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742391_1567 (size=217634) 2024-12-11T06:16:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742392_1568 (size=1832290) 2024-12-11T06:16:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742392_1568 (size=1832290) 2024-12-11T06:16:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742392_1568 (size=1832290) 2024-12-11T06:16:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742393_1569 (size=322274) 2024-12-11T06:16:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742393_1569 (size=322274) 2024-12-11T06:16:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742393_1569 (size=322274) 2024-12-11T06:16:23,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742394_1570 (size=503880) 2024-12-11T06:16:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742394_1570 (size=503880) 2024-12-11T06:16:23,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742394_1570 (size=503880) 2024-12-11T06:16:23,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742395_1571 (size=29229) 2024-12-11T06:16:23,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742395_1571 (size=29229) 2024-12-11T06:16:23,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742395_1571 (size=29229) 2024-12-11T06:16:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742396_1572 (size=24096) 2024-12-11T06:16:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742396_1572 (size=24096) 2024-12-11T06:16:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742396_1572 (size=24096) 2024-12-11T06:16:23,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742397_1573 (size=111872) 2024-12-11T06:16:23,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742397_1573 (size=111872) 2024-12-11T06:16:23,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742397_1573 (size=111872) 2024-12-11T06:16:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742398_1574 (size=45609) 2024-12-11T06:16:23,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742398_1574 (size=45609) 2024-12-11T06:16:23,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742398_1574 (size=45609) 2024-12-11T06:16:23,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742399_1575 (size=443172) 2024-12-11T06:16:23,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742399_1575 (size=443172) 2024-12-11T06:16:23,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742399_1575 (size=443172) 2024-12-11T06:16:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742400_1576 (size=136454) 2024-12-11T06:16:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742400_1576 (size=136454) 2024-12-11T06:16:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742400_1576 (size=136454) 2024-12-11T06:16:23,959 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:16:23,962 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-11T06:16:23,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.3 K 2024-12-11T06:16:23,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-11T06:16:23,963 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-11T06:16:23,964 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-11T06:16:23,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742401_1577 (size=1023) 2024-12-11T06:16:23,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742401_1577 (size=1023) 2024-12-11T06:16:23,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742401_1577 (size=1023) 2024-12-11T06:16:23,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742402_1578 (size=35) 2024-12-11T06:16:23,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742402_1578 (size=35) 2024-12-11T06:16:23,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742402_1578 (size=35) 2024-12-11T06:16:23,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742403_1579 (size=303992) 2024-12-11T06:16:23,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742403_1579 (size=303992) 2024-12-11T06:16:23,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742403_1579 (size=303992) 2024-12-11T06:16:25,076 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000018/launch_container.sh] 2024-12-11T06:16:25,076 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000018/container_tokens] 2024-12-11T06:16:25,076 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000018/sysfs] 2024-12-11T06:16:26,136 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:16:26,136 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:16:26,138 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0009_000001 (auth:SIMPLE) from 127.0.0.1:52914 2024-12-11T06:16:26,150 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000001/launch_container.sh] 2024-12-11T06:16:26,150 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000001/container_tokens] 2024-12-11T06:16:26,150 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0009/container_1733897436125_0009_01_000001/sysfs] 2024-12-11T06:16:26,297 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:16:26,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:51114 2024-12-11T06:16:32,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:47294 2024-12-11T06:16:32,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742404_1580 (size=349690) 2024-12-11T06:16:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742404_1580 (size=349690) 2024-12-11T06:16:32,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742404_1580 (size=349690) 2024-12-11T06:16:34,490 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:43216 2024-12-11T06:16:34,490 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:33610 2024-12-11T06:16:35,344 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:33622 2024-12-11T06:16:35,362 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:43218 2024-12-11T06:16:36,113 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 72540bc724c1dade0fb1e882306b217f changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:16:36,113 DEBUG [master/b66397bb9319:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2a7c8dd9fcbbca25b42780a0d7db509d changed from -1.0 to 0.0, refreshing cache 2024-12-11T06:16:38,142 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:16:38,935 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 84d6f5deb75d828a55d48a410e730336, had cached 0 bytes from a total of 6086 2024-12-11T06:16:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742405_1581 (size=14651) 2024-12-11T06:16:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742405_1581 (size=14651) 2024-12-11T06:16:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742405_1581 (size=14651) 2024-12-11T06:16:40,803 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000002/launch_container.sh] 2024-12-11T06:16:40,803 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000002/container_tokens] 2024-12-11T06:16:40,803 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000002/sysfs] 2024-12-11T06:16:43,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742407_1583 (size=8101) 2024-12-11T06:16:43,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742407_1583 (size=8101) 2024-12-11T06:16:43,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742407_1583 (size=8101) 2024-12-11T06:16:43,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742408_1584 (size=5171) 2024-12-11T06:16:43,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742408_1584 (size=5171) 2024-12-11T06:16:43,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742408_1584 (size=5171) 2024-12-11T06:16:43,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742409_1585 (size=6106) 2024-12-11T06:16:43,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742409_1585 (size=6106) 2024-12-11T06:16:43,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742409_1585 (size=6106) 2024-12-11T06:16:43,474 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000003/launch_container.sh] 2024-12-11T06:16:43,474 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000003/container_tokens] 2024-12-11T06:16:43,474 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000003/sysfs] 2024-12-11T06:16:43,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742406_1582 (size=31734) 2024-12-11T06:16:43,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742406_1582 (size=31734) 2024-12-11T06:16:43,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742406_1582 (size=31734) 2024-12-11T06:16:43,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742410_1586 (size=463) 2024-12-11T06:16:43,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742410_1586 (size=463) 2024-12-11T06:16:43,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742410_1586 (size=463) 2024-12-11T06:16:43,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742411_1587 (size=31734) 2024-12-11T06:16:43,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742411_1587 (size=31734) 2024-12-11T06:16:43,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742411_1587 (size=31734) 2024-12-11T06:16:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742412_1588 (size=349690) 2024-12-11T06:16:43,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742412_1588 (size=349690) 2024-12-11T06:16:43,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742412_1588 (size=349690) 2024-12-11T06:16:43,662 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:55620 2024-12-11T06:16:43,685 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:38396 2024-12-11T06:16:43,693 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:55626 2024-12-11T06:16:45,191 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:16:45,192 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:16:45,213 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-11T06:16:45,213 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:16:45,214 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:16:45,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T06:16:45,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T06:16:45,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T06:16:45,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-11T06:16:45,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-11T06:16:45,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897782145/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-11T06:16:45,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-11T06:16:45,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-11T06:16:45,225 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897805225"}]},"ts":"1733897805225"} 2024-12-11T06:16:45,227 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-11T06:16:45,227 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-11T06:16:45,228 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-11T06:16:45,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, UNASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, UNASSIGN}] 2024-12-11T06:16:45,231 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, UNASSIGN 2024-12-11T06:16:45,231 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, UNASSIGN 2024-12-11T06:16:45,231 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=2a7c8dd9fcbbca25b42780a0d7db509d, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:16:45,231 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=72540bc724c1dade0fb1e882306b217f, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:16:45,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, UNASSIGN because future has completed 2024-12-11T06:16:45,260 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:16:45,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:16:45,261 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, UNASSIGN because future has completed 2024-12-11T06:16:45,268 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:16:45,268 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:16:45,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-11T06:16:45,414 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(122): Close 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:16:45,414 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:16:45,415 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1722): Closing 72540bc724c1dade0fb1e882306b217f, disabling compactions & flushes 2024-12-11T06:16:45,415 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:16:45,415 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:16:45,415 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. after waiting 0 ms 2024-12-11T06:16:45,415 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:16:45,421 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:16:45,421 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:16:45,421 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f. 2024-12-11T06:16:45,421 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(122): Close 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] regionserver.HRegion(1676): Region close journal for 72540bc724c1dade0fb1e882306b217f: Waiting for close lock at 1733897805415Running coprocessor pre-close hooks at 1733897805415Disabling compacts and flushes for region at 1733897805415Disabling writes for close at 1733897805415Writing region close event to WAL at 1733897805416 (+1 ms)Running coprocessor post-close hooks at 1733897805421 (+5 ms)Closed at 1733897805421 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1722): Closing 2a7c8dd9fcbbca25b42780a0d7db509d, disabling compactions & flushes 2024-12-11T06:16:45,422 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. after waiting 0 ms 2024-12-11T06:16:45,422 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:16:45,430 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=72540bc724c1dade0fb1e882306b217f, regionState=CLOSED 2024-12-11T06:16:45,430 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=234}] handler.UnassignRegionHandler(157): Closed 72540bc724c1dade0fb1e882306b217f 2024-12-11T06:16:45,433 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=232, state=RUNNABLE, hasLock=false; CloseRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:16:45,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=232 2024-12-11T06:16:45,437 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=232, state=SUCCESS, hasLock=false; CloseRegionProcedure 72540bc724c1dade0fb1e882306b217f, server=b66397bb9319,35939,1733897428441 in 174 msec 2024-12-11T06:16:45,438 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=72540bc724c1dade0fb1e882306b217f, UNASSIGN in 207 msec 2024-12-11T06:16:45,440 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:16:45,441 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:16:45,441 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d. 2024-12-11T06:16:45,441 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] regionserver.HRegion(1676): Region close journal for 2a7c8dd9fcbbca25b42780a0d7db509d: Waiting for close lock at 1733897805422Running coprocessor pre-close hooks at 1733897805422Disabling compacts and flushes for region at 1733897805422Disabling writes for close at 1733897805422Writing region close event to WAL at 1733897805429 (+7 ms)Running coprocessor post-close hooks at 1733897805441 (+12 ms)Closed at 1733897805441 2024-12-11T06:16:45,442 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=235}] handler.UnassignRegionHandler(157): Closed 2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:16:45,443 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=2a7c8dd9fcbbca25b42780a0d7db509d, regionState=CLOSED 2024-12-11T06:16:45,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=233, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:16:45,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=233 2024-12-11T06:16:45,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=233, state=SUCCESS, hasLock=false; CloseRegionProcedure 2a7c8dd9fcbbca25b42780a0d7db509d, server=b66397bb9319,42579,1733897428663 in 177 msec 2024-12-11T06:16:45,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=233, resume processing ppid=231 2024-12-11T06:16:45,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=2a7c8dd9fcbbca25b42780a0d7db509d, UNASSIGN in 218 msec 2024-12-11T06:16:45,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-12-11T06:16:45,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 224 msec 2024-12-11T06:16:45,462 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897805462"}]},"ts":"1733897805462"} 2024-12-11T06:16:45,463 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-11T06:16:45,463 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-11T06:16:45,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 242 msec 2024-12-11T06:16:45,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-11T06:16:45,537 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T06:16:45,537 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-11T06:16:45,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,539 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=236, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-11T06:16:45,540 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=236, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,543 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-11T06:16:45,544 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f 2024-12-11T06:16:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T06:16:45,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T06:16:45,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T06:16:45,547 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-11T06:16:45,547 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/recovered.edits] 2024-12-11T06:16:45,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:45,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:45,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-11T06:16:45,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:45,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:45,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T06:16:45,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:45,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:45,551 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:45,552 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:45,558 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:16:45,560 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/cf/5bb030151b8e4719bebb2ad86942eeb3 2024-12-11T06:16:45,560 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/recovered.edits] 2024-12-11T06:16:45,564 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f/recovered.edits/9.seqid 2024-12-11T06:16:45,565 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/72540bc724c1dade0fb1e882306b217f 2024-12-11T06:16:45,565 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/cf/d95b4d3246a64301bc896c806ed04b58 2024-12-11T06:16:45,569 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d/recovered.edits/9.seqid 2024-12-11T06:16:45,572 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportWithChecksum/2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:16:45,572 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-11T06:16:45,572 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-11T06:16:45,576 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-11T06:16:45,585 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121121a8842d6be6442eb526f8b16a1f4460_2a7c8dd9fcbbca25b42780a0d7db509d 2024-12-11T06:16:45,585 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e2024121128a7c705efd441b2ad0ed2b2afba6f74_72540bc724c1dade0fb1e882306b217f 2024-12-11T06:16:45,586 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-11T06:16:45,588 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=236, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,592 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-11T06:16:45,595 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-11T06:16:45,596 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=236, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,596 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-11T06:16:45,596 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897805596"}]},"ts":"9223372036854775807"} 2024-12-11T06:16:45,597 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897805596"}]},"ts":"9223372036854775807"} 2024-12-11T06:16:45,599 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:16:45,599 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 72540bc724c1dade0fb1e882306b217f, NAME => 'testtb-testExportWithChecksum,,1733897733806.72540bc724c1dade0fb1e882306b217f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2a7c8dd9fcbbca25b42780a0d7db509d, NAME => 'testtb-testExportWithChecksum,1,1733897733806.2a7c8dd9fcbbca25b42780a0d7db509d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:16:45,599 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-11T06:16:45,600 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897805599"}]},"ts":"9223372036854775807"} 2024-12-11T06:16:45,602 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-11T06:16:45,602 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=236, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-11T06:16:45,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 65 msec 2024-12-11T06:16:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-11T06:16:45,657 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-11T06:16:45,657 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-11T06:16:45,665 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T06:16:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-11T06:16:45,669 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-11T06:16:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-11T06:16:45,699 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=819 (was 817) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2036744832_1 at /127.0.0.1:57008 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:44003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:33864 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:60190 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8972 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:45765 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 30198) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #5 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:44003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:57026 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2036744832_1 at /127.0.0.1:60164 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45765 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=971 (was 735) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=921 (was 1303) 2024-12-11T06:16:45,699 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-11T06:16:45,723 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=819, OpenFileDescriptor=805, MaxFileDescriptor=1048576, SystemLoadAverage=971, ProcessCount=24, AvailableMemoryMB=920 2024-12-11T06:16:45,723 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-12-11T06:16:45,725 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T06:16:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:45,727 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T06:16:45,727 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 237 2024-12-11T06:16:45,728 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T06:16:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-11T06:16:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742413_1589 (size=454) 2024-12-11T06:16:45,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742413_1589 (size=454) 2024-12-11T06:16:45,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742413_1589 (size=454) 2024-12-11T06:16:45,737 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2c757a3ba83f6787a52a17e2d540ad02, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:45,737 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 31fa7d87c48ac12cb7296ad21d89622a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:45,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742414_1590 (size=79) 2024-12-11T06:16:45,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742414_1590 (size=79) 2024-12-11T06:16:45,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742414_1590 (size=79) 2024-12-11T06:16:45,746 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:16:45,746 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 2c757a3ba83f6787a52a17e2d540ad02, disabling compactions & flushes 2024-12-11T06:16:45,746 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:45,746 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:45,746 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. after waiting 0 ms 2024-12-11T06:16:45,746 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:45,746 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:45,747 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2c757a3ba83f6787a52a17e2d540ad02: Waiting for close lock at 1733897805746Disabling compacts and flushes for region at 1733897805746Disabling writes for close at 1733897805746Writing region close event to WAL at 1733897805746Closed at 1733897805746 2024-12-11T06:16:45,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742415_1591 (size=79) 2024-12-11T06:16:45,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742415_1591 (size=79) 2024-12-11T06:16:45,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742415_1591 (size=79) 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing 31fa7d87c48ac12cb7296ad21d89622a, disabling compactions & flushes 2024-12-11T06:16:45,749 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. after waiting 0 ms 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:45,749 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:45,749 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for 31fa7d87c48ac12cb7296ad21d89622a: Waiting for close lock at 1733897805749Disabling compacts and flushes for region at 1733897805749Disabling writes for close at 1733897805749Writing region close event to WAL at 1733897805749Closed at 1733897805749 2024-12-11T06:16:45,750 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T06:16:45,750 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733897805750"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897805750"}]},"ts":"1733897805750"} 2024-12-11T06:16:45,750 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733897805750"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733897805750"}]},"ts":"1733897805750"} 2024-12-11T06:16:45,754 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-11T06:16:45,755 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T06:16:45,755 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897805755"}]},"ts":"1733897805755"} 2024-12-11T06:16:45,757 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-11T06:16:45,757 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {b66397bb9319=0} racks are {/default-rack=0} 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T06:16:45,758 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T06:16:45,758 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T06:16:45,759 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T06:16:45,759 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T06:16:45,759 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T06:16:45,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, ASSIGN}, {pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, ASSIGN}] 2024-12-11T06:16:45,760 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, ASSIGN 2024-12-11T06:16:45,760 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, ASSIGN 2024-12-11T06:16:45,760 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, ASSIGN; state=OFFLINE, location=b66397bb9319,35939,1733897428441; forceNewPlan=false, retain=false 2024-12-11T06:16:45,760 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, ASSIGN; state=OFFLINE, location=b66397bb9319,42579,1733897428663; forceNewPlan=false, retain=false 2024-12-11T06:16:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-11T06:16:45,911 INFO [b66397bb9319:39063 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-11T06:16:45,911 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=31fa7d87c48ac12cb7296ad21d89622a, regionState=OPENING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:16:45,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=2c757a3ba83f6787a52a17e2d540ad02, regionState=OPENING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:16:45,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, ASSIGN because future has completed 2024-12-11T06:16:45,916 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:16:45,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=239, ppid=237, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, ASSIGN because future has completed 2024-12-11T06:16:45,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:16:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-11T06:16:46,071 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,071 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7752): Opening region: {ENCODED => 31fa7d87c48ac12cb7296ad21d89622a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.', STARTKEY => '', ENDKEY => '1'} 2024-12-11T06:16:46,072 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. service=AccessControlService 2024-12-11T06:16:46,072 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:16:46,072 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,073 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:16:46,073 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7794): checking encryption for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,073 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(7797): checking classloading for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,074 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:46,074 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c757a3ba83f6787a52a17e2d540ad02, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.', STARTKEY => '1', ENDKEY => ''} 2024-12-11T06:16:46,074 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. service=AccessControlService 2024-12-11T06:16:46,074 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-11T06:16:46,074 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,074 INFO [StoreOpener-31fa7d87c48ac12cb7296ad21d89622a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,074 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T06:16:46,075 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7794): checking encryption for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,075 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(7797): checking classloading for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,076 INFO [StoreOpener-2c757a3ba83f6787a52a17e2d540ad02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,076 INFO [StoreOpener-31fa7d87c48ac12cb7296ad21d89622a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 31fa7d87c48ac12cb7296ad21d89622a columnFamilyName cf 2024-12-11T06:16:46,077 DEBUG [StoreOpener-31fa7d87c48ac12cb7296ad21d89622a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:46,078 INFO [StoreOpener-31fa7d87c48ac12cb7296ad21d89622a-1 {}] regionserver.HStore(327): Store=31fa7d87c48ac12cb7296ad21d89622a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:16:46,078 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1038): replaying wal for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,079 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,079 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,079 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1048): stopping wal replay for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,079 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1060): Cleaning up temporary data for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,081 INFO [StoreOpener-2c757a3ba83f6787a52a17e2d540ad02-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c757a3ba83f6787a52a17e2d540ad02 columnFamilyName cf 2024-12-11T06:16:46,081 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1093): writing seq id for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,081 DEBUG [StoreOpener-2c757a3ba83f6787a52a17e2d540ad02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:46,082 INFO [StoreOpener-2c757a3ba83f6787a52a17e2d540ad02-1 {}] regionserver.HStore(327): Store=2c757a3ba83f6787a52a17e2d540ad02/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T06:16:46,082 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1038): replaying wal for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,083 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,083 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,084 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:16:46,084 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1048): stopping wal replay for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,084 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1060): Cleaning up temporary data for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,084 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1114): Opened 31fa7d87c48ac12cb7296ad21d89622a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68987462, jitterRate=0.02799329161643982}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:16:46,084 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,085 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegion(1006): Region open journal for 31fa7d87c48ac12cb7296ad21d89622a: Running coprocessor pre-open hook at 1733897806073Writing region info on filesystem at 1733897806073Initializing all the Stores at 1733897806074 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897806074Cleaning up temporary data from old regions at 1733897806079 (+5 ms)Running coprocessor post-open hooks at 1733897806084 (+5 ms)Region opened successfully at 1733897806085 (+1 ms) 2024-12-11T06:16:46,085 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1093): writing seq id for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,086 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a., pid=240, masterSystemTime=1733897806068 2024-12-11T06:16:46,088 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T06:16:46,089 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1114): Opened 2c757a3ba83f6787a52a17e2d540ad02; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75242682, jitterRate=0.12120333313941956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T06:16:46,089 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,089 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegion(1006): Region open journal for 2c757a3ba83f6787a52a17e2d540ad02: Running coprocessor pre-open hook at 1733897806075Writing region info on filesystem at 1733897806075Initializing all the Stores at 1733897806075Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733897806075Cleaning up temporary data from old regions at 1733897806084 (+9 ms)Running coprocessor post-open hooks at 1733897806089 (+5 ms)Region opened successfully at 1733897806089 2024-12-11T06:16:46,090 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,090 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=240}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,090 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02., pid=241, masterSystemTime=1733897806070 2024-12-11T06:16:46,090 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=238 updating hbase:meta row=31fa7d87c48ac12cb7296ad21d89622a, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:16:46,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=240, ppid=238, state=RUNNABLE, hasLock=false; OpenRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:16:46,093 DEBUG [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:46,093 INFO [RS_OPEN_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_OPEN_REGION, pid=241}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:46,094 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=239 updating hbase:meta row=2c757a3ba83f6787a52a17e2d540ad02, regionState=OPEN, openSeqNum=2, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:16:46,096 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=241, ppid=239, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:16:46,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=238 2024-12-11T06:16:46,098 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=238, state=SUCCESS, hasLock=false; OpenRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663 in 179 msec 2024-12-11T06:16:46,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, ASSIGN in 339 msec 2024-12-11T06:16:46,104 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=b66397bb9319,35939,1733897428441, table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T06:16:46,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-11T06:16:46,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; OpenRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441 in 187 msec 2024-12-11T06:16:46,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=239, resume processing ppid=237 2024-12-11T06:16:46,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, ppid=237, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, ASSIGN in 348 msec 2024-12-11T06:16:46,110 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T06:16:46,111 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897806110"}]},"ts":"1733897806110"} 2024-12-11T06:16:46,113 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-11T06:16:46,113 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=237, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T06:16:46,114 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-11T06:16:46,117 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T06:16:46,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:46,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:46,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:46,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-11T06:16:46,125 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 397 msec 2024-12-11T06:16:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=237 2024-12-11T06:16:46,357 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T06:16:46,357 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:46,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:46,360 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,360 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:16:46,362 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:46,367 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:46,372 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:46,374 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T06:16:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897806374 (current time:1733897806374). 2024-12-11T06:16:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:16:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T06:16:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:16:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3de11d6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:16:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:16:46,376 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:16:46,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:16:46,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:16:46,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61c0fc01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:16:46,377 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:16:46,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,378 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:16:46,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41cf9dc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:16:46,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:16:46,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:46,381 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45750, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:46,382 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:16:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:16:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,383 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:16:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63878ecb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:16:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:16:46,384 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:16:46,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:16:46,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:16:46,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d973cc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,384 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:16:46,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:16:46,385 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,386 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:16:46,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51cc38c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:16:46,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:16:46,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:46,389 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:46,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=30] 2024-12-11T06:16:46,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:46,392 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40386, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:46,393 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:16:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:16:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T06:16:46,394 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:16:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:16:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T06:16:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-11T06:16:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T06:16:46,396 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:16:46,397 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:16:46,399 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:16:46,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742416_1592 (size=203) 2024-12-11T06:16:46,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742416_1592 (size=203) 2024-12-11T06:16:46,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742416_1592 (size=203) 2024-12-11T06:16:46,410 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:16:46,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02}] 2024-12-11T06:16:46,411 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,411 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T06:16:46,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-12-11T06:16:46,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-12-11T06:16:46,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:46,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for 31fa7d87c48ac12cb7296ad21d89622a: 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 2c757a3ba83f6787a52a17e2d540ad02: 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:16:46,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-11T06:16:46,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742417_1593 (size=82) 2024-12-11T06:16:46,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742417_1593 (size=82) 2024-12-11T06:16:46,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742417_1593 (size=82) 2024-12-11T06:16:46,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:46,579 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-12-11T06:16:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-12-11T06:16:46,580 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,580 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:46,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 in 171 msec 2024-12-11T06:16:46,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742418_1594 (size=82) 2024-12-11T06:16:46,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742418_1594 (size=82) 2024-12-11T06:16:46,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742418_1594 (size=82) 2024-12-11T06:16:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-12-11T06:16:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-12-11T06:16:46,590 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,590 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:46,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-11T06:16:46,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a in 180 msec 2024-12-11T06:16:46,592 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:16:46,593 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:16:46,594 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:16:46,594 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:16:46,594 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:46,595 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-11T06:16:46,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742419_1595 (size=74) 2024-12-11T06:16:46,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742419_1595 (size=74) 2024-12-11T06:16:46,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742419_1595 (size=74) 2024-12-11T06:16:46,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T06:16:47,005 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:16:47,005 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,006 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742420_1596 (size=697) 2024-12-11T06:16:47,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742420_1596 (size=697) 2024-12-11T06:16:47,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742420_1596 (size=697) 2024-12-11T06:16:47,020 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:16:47,024 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:16:47,024 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,025 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:16:47,025 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-12-11T06:16:47,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 631 msec 2024-12-11T06:16:47,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-11T06:16:47,027 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T06:16:47,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42579 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:16:47,033 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35939 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. with WAL disabled. Data may be lost in the event of a crash. 2024-12-11T06:16:47,034 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:47,036 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,036 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:47,036 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T06:16:47,038 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:47,041 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:47,046 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-11T06:16:47,048 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733897807048 (current time:1733897807048). 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5673d715, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:16:47,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:16:47,049 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:16:47,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:16:47,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:16:47,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dc20978, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:16:47,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:16:47,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,051 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:16:47,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79fea8e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:16:47,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:16:47,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:47,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:47,054 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:16:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:16:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,054 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:16:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a876878, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ClusterIdFetcher(90): Going to request b66397bb9319,39063,-1 for getting cluster id 2024-12-11T06:16:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T06:16:47,055 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5d78b9e9-31a6-4f57-a820-448a70e091f0' 2024-12-11T06:16:47,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T06:16:47,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5d78b9e9-31a6-4f57-a820-448a70e091f0" 2024-12-11T06:16:47,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b48b283, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [b66397bb9319,39063,-1] 2024-12-11T06:16:47,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T06:16:47,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,057 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T06:16:47,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35c570f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T06:16:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T06:16:47,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=b66397bb9319,35939,1733897428441, seqNum=-1] 2024-12-11T06:16:47,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:47,059 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45778, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:47,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., hostname=b66397bb9319,44539,1733897428597, seqNum=30] 2024-12-11T06:16:47,060 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T06:16:47,061 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40396, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T06:16:47,062 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063. 2024-12-11T06:16:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-11T06:16:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:16:47,062 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:16:47,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-11T06:16:47,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-11T06:16:47,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-11T06:16:47,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-11T06:16:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T06:16:47,065 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-11T06:16:47,066 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-11T06:16:47,068 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-11T06:16:47,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742421_1597 (size=198) 2024-12-11T06:16:47,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742421_1597 (size=198) 2024-12-11T06:16:47,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742421_1597 (size=198) 2024-12-11T06:16:47,074 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-11T06:16:47,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a}, {pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02}] 2024-12-11T06:16:47,075 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:47,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:47,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T06:16:47,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42579 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=246 2024-12-11T06:16:47,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:47,226 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2902): Flushing 31fa7d87c48ac12cb7296ad21d89622a 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-11T06:16:47,226 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35939 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=247 2024-12-11T06:16:47,226 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:47,227 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2902): Flushing 2c757a3ba83f6787a52a17e2d540ad02 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-11T06:16:47,243 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 is 71, key is 118a62410f332c04c96c856121a64643/cf:q/1733897807033/Put/seqid=0 2024-12-11T06:16:47,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742422_1598 (size=8101) 2024-12-11T06:16:47,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742422_1598 (size=8101) 2024-12-11T06:16:47,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742422_1598 (size=8101) 2024-12-11T06:16:47,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:47,254 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:47,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/.tmp/cf/f381f24f48914e51a951ba644b3dc05e, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=2c757a3ba83f6787a52a17e2d540ad02] 2024-12-11T06:16:47,255 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/.tmp/cf/f381f24f48914e51a951ba644b3dc05e is 220, key is 160012a3848b820df1a4eccf8b009cab8/cf:q/1733897807033/Put/seqid=0 2024-12-11T06:16:47,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a is 71, key is 02054751a6fbca6f8a6f241ba60474dd/cf:q/1733897807032/Put/seqid=0 2024-12-11T06:16:47,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742423_1599 (size=15309) 2024-12-11T06:16:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742423_1599 (size=15309) 2024-12-11T06:16:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742424_1600 (size=5172) 2024-12-11T06:16:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742424_1600 (size=5172) 2024-12-11T06:16:47,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742424_1600 (size=5172) 2024-12-11T06:16:47,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742423_1599 (size=15309) 2024-12-11T06:16:47,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:47,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/.tmp/cf/f381f24f48914e51a951ba644b3dc05e 2024-12-11T06:16:47,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:47,270 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/.tmp/cf/f381f24f48914e51a951ba644b3dc05e as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e 2024-12-11T06:16:47,271 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/.tmp/cf/099844c333374943b7463c6c0afd7819, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=31fa7d87c48ac12cb7296ad21d89622a] 2024-12-11T06:16:47,271 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/.tmp/cf/099844c333374943b7463c6c0afd7819 is 220, key is 00fea438c5a41fe4ad98243f56ff3d7cb/cf:q/1733897807032/Put/seqid=0 2024-12-11T06:16:47,274 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e, entries=46, sequenceid=6, filesize=15.0 K 2024-12-11T06:16:47,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 2c757a3ba83f6787a52a17e2d540ad02 in 48ms, sequenceid=6, compaction requested=false 2024-12-11T06:16:47,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.HRegion(2603): Flush status journal for 2c757a3ba83f6787a52a17e2d540ad02: 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T06:16:47,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742425_1601 (size=6178) 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e] hfiles 2024-12-11T06:16:47,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742425_1601 (size=6178) 2024-12-11T06:16:47,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742425_1601 (size=6178) 2024-12-11T06:16:47,277 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/.tmp/cf/099844c333374943b7463c6c0afd7819 2024-12-11T06:16:47,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742426_1602 (size=121) 2024-12-11T06:16:47,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742426_1602 (size=121) 2024-12-11T06:16:47,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/.tmp/cf/099844c333374943b7463c6c0afd7819 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819 2024-12-11T06:16:47,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742426_1602 (size=121) 2024-12-11T06:16:47,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:16:47,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=247}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=247 2024-12-11T06:16:47,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=247 2024-12-11T06:16:47,282 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:47,282 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:47,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02 in 209 msec 2024-12-11T06:16:47,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819, entries=4, sequenceid=6, filesize=6.0 K 2024-12-11T06:16:47,286 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 31fa7d87c48ac12cb7296ad21d89622a in 60ms, sequenceid=6, compaction requested=false 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.HRegion(2603): Flush status journal for 31fa7d87c48ac12cb7296ad21d89622a: 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819] hfiles 2024-12-11T06:16:47,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742427_1603 (size=121) 2024-12-11T06:16:47,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742427_1603 (size=121) 2024-12-11T06:16:47,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742427_1603 (size=121) 2024-12-11T06:16:47,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:16:47,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/b66397bb9319:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=246}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=246 2024-12-11T06:16:47,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster(4169): Remote procedure done, pid=246 2024-12-11T06:16:47,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:47,293 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=246, ppid=245, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:47,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-11T06:16:47,295 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a in 219 msec 2024-12-11T06:16:47,295 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-11T06:16:47,296 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-11T06:16:47,296 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-11T06:16:47,296 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-11T06:16:47,297 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T06:16:47,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a] hfiles 2024-12-11T06:16:47,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:16:47,298 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:16:47,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742428_1604 (size=305) 2024-12-11T06:16:47,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742428_1604 (size=305) 2024-12-11T06:16:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742428_1604 (size=305) 2024-12-11T06:16:47,304 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-11T06:16:47,304 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,305 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742429_1605 (size=1007) 2024-12-11T06:16:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742429_1605 (size=1007) 2024-12-11T06:16:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742429_1605 (size=1007) 2024-12-11T06:16:47,313 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-11T06:16:47,318 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-11T06:16:47,319 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,320 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=245, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-11T06:16:47,320 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 245 2024-12-11T06:16:47,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=245, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 257 msec 2024-12-11T06:16:47,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-12-11T06:16:47,377 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T06:16:47,377 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377 2024-12-11T06:16:47,377 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:42173, tgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377, rawTgtDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377, srcFsUri=hdfs://localhost:42173, srcDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:47,406 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:42173, inputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b 2024-12-11T06:16:47,406 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-11T06:16:47,411 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:47,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742430_1606 (size=198) 2024-12-11T06:16:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742430_1606 (size=198) 2024-12-11T06:16:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742431_1607 (size=1007) 2024-12-11T06:16:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742431_1607 (size=1007) 2024-12-11T06:16:47,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742431_1607 (size=1007) 2024-12-11T06:16:47,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742430_1606 (size=198) 2024-12-11T06:16:47,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:47,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:47,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:16:48,055 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-11T06:16:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-11T06:16:48,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-11077788960376223073.jar 2024-12-11T06:16:48,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop-12644518411581889517.jar 2024-12-11T06:16:48,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-11T06:16:48,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-11T06:16:48,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-11T06:16:48,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-11T06:16:48,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-11T06:16:48,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-11T06:16:48,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-11T06:16:48,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:48,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-11T06:16:48,538 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000005/launch_container.sh] 2024-12-11T06:16:48,538 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000005/container_tokens] 2024-12-11T06:16:48,538 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000005/sysfs] 2024-12-11T06:16:48,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742432_1608 (size=131440) 2024-12-11T06:16:48,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742432_1608 (size=131440) 2024-12-11T06:16:48,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742432_1608 (size=131440) 2024-12-11T06:16:48,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742433_1609 (size=4188619) 2024-12-11T06:16:48,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742433_1609 (size=4188619) 2024-12-11T06:16:48,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742433_1609 (size=4188619) 2024-12-11T06:16:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742434_1610 (size=1323991) 2024-12-11T06:16:48,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742434_1610 (size=1323991) 2024-12-11T06:16:48,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742434_1610 (size=1323991) 2024-12-11T06:16:48,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742435_1611 (size=903542) 2024-12-11T06:16:48,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742435_1611 (size=903542) 2024-12-11T06:16:48,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742435_1611 (size=903542) 2024-12-11T06:16:48,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742436_1612 (size=8360360) 2024-12-11T06:16:48,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742436_1612 (size=8360360) 2024-12-11T06:16:48,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742436_1612 (size=8360360) 2024-12-11T06:16:48,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742437_1613 (size=1877034) 2024-12-11T06:16:48,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742437_1613 (size=1877034) 2024-12-11T06:16:48,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742437_1613 (size=1877034) 2024-12-11T06:16:48,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742438_1614 (size=77835) 2024-12-11T06:16:48,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742438_1614 (size=77835) 2024-12-11T06:16:48,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742438_1614 (size=77835) 2024-12-11T06:16:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742439_1615 (size=30949) 2024-12-11T06:16:48,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742439_1615 (size=30949) 2024-12-11T06:16:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742439_1615 (size=30949) 2024-12-11T06:16:48,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742440_1616 (size=1597213) 2024-12-11T06:16:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742440_1616 (size=1597213) 2024-12-11T06:16:48,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742440_1616 (size=1597213) 2024-12-11T06:16:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742441_1617 (size=4695811) 2024-12-11T06:16:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742441_1617 (size=4695811) 2024-12-11T06:16:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742441_1617 (size=4695811) 2024-12-11T06:16:48,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742442_1618 (size=232957) 2024-12-11T06:16:48,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742442_1618 (size=232957) 2024-12-11T06:16:48,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742442_1618 (size=232957) 2024-12-11T06:16:48,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742443_1619 (size=127628) 2024-12-11T06:16:48,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742443_1619 (size=127628) 2024-12-11T06:16:48,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742443_1619 (size=127628) 2024-12-11T06:16:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742444_1620 (size=20406) 2024-12-11T06:16:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742444_1620 (size=20406) 2024-12-11T06:16:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742444_1620 (size=20406) 2024-12-11T06:16:48,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742445_1621 (size=5175431) 2024-12-11T06:16:48,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742445_1621 (size=5175431) 2024-12-11T06:16:48,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742445_1621 (size=5175431) 2024-12-11T06:16:48,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742446_1622 (size=6426220) 2024-12-11T06:16:48,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742446_1622 (size=6426220) 2024-12-11T06:16:48,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742446_1622 (size=6426220) 2024-12-11T06:16:48,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742447_1623 (size=217634) 2024-12-11T06:16:48,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742447_1623 (size=217634) 2024-12-11T06:16:48,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742447_1623 (size=217634) 2024-12-11T06:16:48,733 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000004/launch_container.sh] 2024-12-11T06:16:48,733 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000004/container_tokens] 2024-12-11T06:16:48,733 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000004/sysfs] 2024-12-11T06:16:48,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742448_1624 (size=1832290) 2024-12-11T06:16:48,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742448_1624 (size=1832290) 2024-12-11T06:16:48,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742448_1624 (size=1832290) 2024-12-11T06:16:48,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742449_1625 (size=322274) 2024-12-11T06:16:48,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742449_1625 (size=322274) 2024-12-11T06:16:48,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742449_1625 (size=322274) 2024-12-11T06:16:48,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742450_1626 (size=443172) 2024-12-11T06:16:48,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742450_1626 (size=443172) 2024-12-11T06:16:48,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742450_1626 (size=443172) 2024-12-11T06:16:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742451_1627 (size=503880) 2024-12-11T06:16:48,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742451_1627 (size=503880) 2024-12-11T06:16:48,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742451_1627 (size=503880) 2024-12-11T06:16:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742452_1628 (size=29229) 2024-12-11T06:16:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742452_1628 (size=29229) 2024-12-11T06:16:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742452_1628 (size=29229) 2024-12-11T06:16:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742453_1629 (size=24096) 2024-12-11T06:16:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742453_1629 (size=24096) 2024-12-11T06:16:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742453_1629 (size=24096) 2024-12-11T06:16:48,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742454_1630 (size=111872) 2024-12-11T06:16:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742454_1630 (size=111872) 2024-12-11T06:16:48,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742454_1630 (size=111872) 2024-12-11T06:16:48,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742455_1631 (size=45609) 2024-12-11T06:16:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742455_1631 (size=45609) 2024-12-11T06:16:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742455_1631 (size=45609) 2024-12-11T06:16:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742456_1632 (size=136454) 2024-12-11T06:16:48,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742456_1632 (size=136454) 2024-12-11T06:16:48,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742456_1632 (size=136454) 2024-12-11T06:16:48,791 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-11T06:16:48,792 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-11T06:16:48,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.0 K 2024-12-11T06:16:48,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-11T06:16:48,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-11T06:16:48,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.1 K 2024-12-11T06:16:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742457_1633 (size=1079) 2024-12-11T06:16:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742457_1633 (size=1079) 2024-12-11T06:16:48,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742457_1633 (size=1079) 2024-12-11T06:16:48,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742458_1634 (size=35) 2024-12-11T06:16:48,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742458_1634 (size=35) 2024-12-11T06:16:48,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742458_1634 (size=35) 2024-12-11T06:16:48,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742459_1635 (size=304170) 2024-12-11T06:16:48,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742459_1635 (size=304170) 2024-12-11T06:16:48,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742459_1635 (size=304170) 2024-12-11T06:16:49,778 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:16:49,778 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-11T06:16:49,781 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0010_000001 (auth:SIMPLE) from 127.0.0.1:38402 2024-12-11T06:16:49,793 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000001/launch_container.sh] 2024-12-11T06:16:49,793 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000001/container_tokens] 2024-12-11T06:16:49,793 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0010/container_1733897436125_0010_01_000001/sysfs] 2024-12-11T06:16:50,433 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:55638 2024-12-11T06:16:50,941 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:16:55,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:42544 2024-12-11T06:16:56,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:16:56,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742460_1636 (size=349892) 2024-12-11T06:16:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742460_1636 (size=349892) 2024-12-11T06:16:56,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742460_1636 (size=349892) 2024-12-11T06:16:58,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:38578 2024-12-11T06:16:58,313 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:60222 2024-12-11T06:16:59,153 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:60236 2024-12-11T06:16:59,154 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:38594 2024-12-11T06:17:01,783 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733897436125_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-11T06:17:02,576 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0c6631449afb0062d10d83de4eb96abd, had cached 0 bytes from a total of 5735 2024-12-11T06:17:03,140 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 122c4bf679e9253b1a206af0572eb5cb, had cached 0 bytes from a total of 14463 2024-12-11T06:17:04,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742461_1637 (size=15309) 2024-12-11T06:17:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742461_1637 (size=15309) 2024-12-11T06:17:04,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742461_1637 (size=15309) 2024-12-11T06:17:04,610 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000002/launch_container.sh] 2024-12-11T06:17:04,611 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000002/container_tokens] 2024-12-11T06:17:04,611 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000002/sysfs] 2024-12-11T06:17:06,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742463_1639 (size=5172) 2024-12-11T06:17:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742463_1639 (size=5172) 2024-12-11T06:17:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742463_1639 (size=5172) 2024-12-11T06:17:06,544 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000005/launch_container.sh] 2024-12-11T06:17:06,544 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000005/container_tokens] 2024-12-11T06:17:06,545 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000005/sysfs] 2024-12-11T06:17:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742464_1640 (size=6178) 2024-12-11T06:17:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742464_1640 (size=6178) 2024-12-11T06:17:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742464_1640 (size=6178) 2024-12-11T06:17:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742465_1641 (size=8101) 2024-12-11T06:17:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742465_1641 (size=8101) 2024-12-11T06:17:06,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742465_1641 (size=8101) 2024-12-11T06:17:06,984 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000004/launch_container.sh] 2024-12-11T06:17:06,984 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000004/container_tokens] 2024-12-11T06:17:06,984 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000004/sysfs] 2024-12-11T06:17:07,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742462_1638 (size=31804) 2024-12-11T06:17:07,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742462_1638 (size=31804) 2024-12-11T06:17:07,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742462_1638 (size=31804) 2024-12-11T06:17:07,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742466_1642 (size=477) 2024-12-11T06:17:07,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742466_1642 (size=477) 2024-12-11T06:17:07,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742466_1642 (size=477) 2024-12-11T06:17:07,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742467_1643 (size=31804) 2024-12-11T06:17:07,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742467_1643 (size=31804) 2024-12-11T06:17:07,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742467_1643 (size=31804) 2024-12-11T06:17:07,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742468_1644 (size=349892) 2024-12-11T06:17:07,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742468_1644 (size=349892) 2024-12-11T06:17:07,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742468_1644 (size=349892) 2024-12-11T06:17:09,025 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-11T06:17:09,025 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-11T06:17:09,050 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,051 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-11T06:17:09,052 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-11T06:17:09,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T06:17:09,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T06:17:09,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1081875296_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-11T06:17:09,053 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/export-test/export-1733897807377/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-11T06:17:09,065 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-11T06:17:09,069 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897829069"}]},"ts":"1733897829069"} 2024-12-11T06:17:09,071 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-11T06:17:09,071 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-11T06:17:09,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=248, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-11T06:17:09,074 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, UNASSIGN}, {pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, UNASSIGN}] 2024-12-11T06:17:09,074 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, UNASSIGN 2024-12-11T06:17:09,074 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, UNASSIGN 2024-12-11T06:17:09,075 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=2c757a3ba83f6787a52a17e2d540ad02, regionState=CLOSING, regionLocation=b66397bb9319,35939,1733897428441 2024-12-11T06:17:09,076 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=31fa7d87c48ac12cb7296ad21d89622a, regionState=CLOSING, regionLocation=b66397bb9319,42579,1733897428663 2024-12-11T06:17:09,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=251, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, UNASSIGN because future has completed 2024-12-11T06:17:09,077 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:17:09,077 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441}] 2024-12-11T06:17:09,077 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=249, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, UNASSIGN because future has completed 2024-12-11T06:17:09,078 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T06:17:09,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663}] 2024-12-11T06:17:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-11T06:17:09,230 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(122): Close 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:17:09,230 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(122): Close 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1722): Closing 31fa7d87c48ac12cb7296ad21d89622a, disabling compactions & flushes 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1722): Closing 2c757a3ba83f6787a52a17e2d540ad02, disabling compactions & flushes 2024-12-11T06:17:09,230 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:17:09,230 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. after waiting 0 ms 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. after waiting 0 ms 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:17:09,230 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:17:09,235 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:17:09,236 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T06:17:09,237 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:09,237 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:09,237 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02. 2024-12-11T06:17:09,237 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a. 2024-12-11T06:17:09,237 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] regionserver.HRegion(1676): Region close journal for 2c757a3ba83f6787a52a17e2d540ad02: Waiting for close lock at 1733897829230Running coprocessor pre-close hooks at 1733897829230Disabling compacts and flushes for region at 1733897829230Disabling writes for close at 1733897829230Writing region close event to WAL at 1733897829231 (+1 ms)Running coprocessor post-close hooks at 1733897829236 (+5 ms)Closed at 1733897829237 (+1 ms) 2024-12-11T06:17:09,237 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] regionserver.HRegion(1676): Region close journal for 31fa7d87c48ac12cb7296ad21d89622a: Waiting for close lock at 1733897829230Running coprocessor pre-close hooks at 1733897829230Disabling compacts and flushes for region at 1733897829230Disabling writes for close at 1733897829230Writing region close event to WAL at 1733897829231 (+1 ms)Running coprocessor post-close hooks at 1733897829236 (+5 ms)Closed at 1733897829237 (+1 ms) 2024-12-11T06:17:09,239 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=253}] handler.UnassignRegionHandler(157): Closed 31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:17:09,240 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=250 updating hbase:meta row=31fa7d87c48ac12cb7296ad21d89622a, regionState=CLOSED 2024-12-11T06:17:09,241 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION, pid=252}] handler.UnassignRegionHandler(157): Closed 2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:17:09,241 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=251 updating hbase:meta row=2c757a3ba83f6787a52a17e2d540ad02, regionState=CLOSED 2024-12-11T06:17:09,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=253, ppid=250, state=RUNNABLE, hasLock=false; CloseRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663 because future has completed 2024-12-11T06:17:09,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=252, ppid=251, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441 because future has completed 2024-12-11T06:17:09,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=253, resume processing ppid=250 2024-12-11T06:17:09,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=253, ppid=250, state=SUCCESS, hasLock=false; CloseRegionProcedure 31fa7d87c48ac12cb7296ad21d89622a, server=b66397bb9319,42579,1733897428663 in 165 msec 2024-12-11T06:17:09,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=252, resume processing ppid=251 2024-12-11T06:17:09,247 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=252, ppid=251, state=SUCCESS, hasLock=false; CloseRegionProcedure 2c757a3ba83f6787a52a17e2d540ad02, server=b66397bb9319,35939,1733897428441 in 167 msec 2024-12-11T06:17:09,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=31fa7d87c48ac12cb7296ad21d89622a, UNASSIGN in 173 msec 2024-12-11T06:17:09,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=251, resume processing ppid=249 2024-12-11T06:17:09,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, ppid=249, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2c757a3ba83f6787a52a17e2d540ad02, UNASSIGN in 173 msec 2024-12-11T06:17:09,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=248 2024-12-11T06:17:09,251 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=248, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 178 msec 2024-12-11T06:17:09,253 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733897829253"}]},"ts":"1733897829253"} 2024-12-11T06:17:09,254 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-11T06:17:09,254 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-11T06:17:09,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 190 msec 2024-12-11T06:17:09,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-11T06:17:09,387 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T06:17:09,388 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] procedure2.ProcedureExecutor(1139): Stored pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,390 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=254, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,391 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=254, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,394 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44539 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,394 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:17:09,394 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:17:09,396 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/recovered.edits] 2024-12-11T06:17:09,396 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf, FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/recovered.edits] 2024-12-11T06:17:09,399 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/cf/f381f24f48914e51a951ba644b3dc05e 2024-12-11T06:17:09,400 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/cf/099844c333374943b7463c6c0afd7819 2024-12-11T06:17:09,402 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02/recovered.edits/9.seqid 2024-12-11T06:17:09,403 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/recovered.edits/9.seqid to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a/recovered.edits/9.seqid 2024-12-11T06:17:09,403 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:17:09,403 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testtb-testExportFileSystemStateWithSkipTmp/31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:17:09,403 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-11T06:17:09,404 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-11T06:17:09,405 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-11T06:17:09,408 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b2024121197f97180b07c40b0ac32eaf578c031c6_2c757a3ba83f6787a52a17e2d540ad02 2024-12-11T06:17:09,409 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a to hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e2024121167150b5267714c5581fb3b692e814b79_31fa7d87c48ac12cb7296ad21d89622a 2024-12-11T06:17:09,409 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-11T06:17:09,411 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=254, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,414 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-11T06:17:09,417 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-11T06:17:09,418 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=254, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,418 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-11T06:17:09,418 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897829418"}]},"ts":"9223372036854775807"} 2024-12-11T06:17:09,418 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733897829418"}]},"ts":"9223372036854775807"} 2024-12-11T06:17:09,421 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-11T06:17:09,421 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 31fa7d87c48ac12cb7296ad21d89622a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733897805725.31fa7d87c48ac12cb7296ad21d89622a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2c757a3ba83f6787a52a17e2d540ad02, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733897805725.2c757a3ba83f6787a52a17e2d540ad02.', STARTKEY => '1', ENDKEY => ''}] 2024-12-11T06:17:09,421 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-11T06:17:09,421 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733897829421"}]},"ts":"9223372036854775807"} 2024-12-11T06:17:09,423 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-11T06:17:09,424 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=254, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=254, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 36 msec 2024-12-11T06:17:09,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,555 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T06:17:09,555 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T06:17:09,555 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T06:17:09,556 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-11T06:17:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=254 2024-12-11T06:17:09,631 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,632 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-11T06:17:09,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:17:09,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:17:09,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:17:09,632 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-11T06:17:09,640 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T06:17:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-11T06:17:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:09,674 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=826 (was 819) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1448614641_1 at /127.0.0.1:35844 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:40016 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9832 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1538188378) connection to localhost/127.0.0.1:38683 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:57828 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1448614641_1 at /127.0.0.1:40002 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 1135) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38683 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1081875296_22 at /127.0.0.1:35874 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 805), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1054 (was 971) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 24), AvailableMemoryMB=760 (was 920) 2024-12-11T06:17:09,674 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=826 is superior to 500 2024-12-11T06:17:09,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-11T06:17:09,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@41960f37{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T06:17:09,684 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@762aa3c9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:17:09,684 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:17:09,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69d482a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T06:17:09,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18f5cdc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:17:12,212 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000003/launch_container.sh] 2024-12-11T06:17:12,212 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000003/container_tokens] 2024-12-11T06:17:12,213 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000003/sysfs] 2024-12-11T06:17:13,258 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733897436125_0011_000001 (auth:SIMPLE) from 127.0.0.1:56694 2024-12-11T06:17:13,270 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000001/launch_container.sh] 2024-12-11T06:17:13,270 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000001/container_tokens] 2024-12-11T06:17:13,270 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_344871687/yarn-2142430691/MiniMRCluster_344871687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733897436125_0011/container_1733897436125_0011_01_000001/sysfs] 2024-12-11T06:17:14,754 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:17:18,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-11T06:17:23,557 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:17:23,935 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 84d6f5deb75d828a55d48a410e730336, had cached 0 bytes from a total of 6086 2024-12-11T06:17:26,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:17:26,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ae6f868{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-11T06:17:26,697 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7310eace{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:17:26,697 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:17:26,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ea861f3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T06:17:26,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@310cee80{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:17:43,704 ERROR [Thread[Thread-391,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T06:17:43,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d311af2{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-11T06:17:43,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@28824225{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:17:43,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:17:43,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@293bf43a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T06:17:43,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4096812e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:17:43,709 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-11T06:17:43,714 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-11T06:17:43,714 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-11T06:17:43,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741830_1006 (size=1185566) 2024-12-11T06:17:43,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741830_1006 (size=1185566) 2024-12-11T06:17:43,722 ERROR [Thread[Thread-418,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T06:17:43,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@418de80b{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-11T06:17:43,726 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@581cb918{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:17:43,726 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:17:43,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a8e0d74{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-11T06:17:43,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6109c21b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:17:43,727 ERROR [Thread[Thread-373,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-11T06:17:43,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-11T06:17:43,727 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T06:17:43,727 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T06:17:43,728 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:17:43,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,728 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,728 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T06:17:43,728 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T06:17:43,728 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1239213299, stopped=false 2024-12-11T06:17:43,729 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,729 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T06:17:43,729 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=b66397bb9319,39063,1733897427482 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T06:17:43,730 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:17:43,730 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T06:17:43,731 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T06:17:43,731 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:17:43,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:17:43,731 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b66397bb9319,35939,1733897428441' ***** 2024-12-11T06:17:43,732 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:17:43,732 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:17:43,732 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,732 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T06:17:43,733 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b66397bb9319,44539,1733897428597' ***** 2024-12-11T06:17:43,733 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,733 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T06:17:43,733 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'b66397bb9319,42579,1733897428663' ***** 2024-12-11T06:17:43,733 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,733 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T06:17:43,733 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T06:17:43,733 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T06:17:43,733 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T06:17:43,733 INFO [RS:1;b66397bb9319:44539 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T06:17:43,733 INFO [RS:2;b66397bb9319:42579 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T06:17:43,733 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T06:17:43,733 INFO [RS:2;b66397bb9319:42579 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T06:17:43,733 INFO [RS:1;b66397bb9319:44539 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(3091): Received CLOSE for 0c6631449afb0062d10d83de4eb96abd 2024-12-11T06:17:43,734 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(3091): Received CLOSE for 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:17:43,734 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(3091): Received CLOSE for 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:17:43,734 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(959): stopping server b66397bb9319,42579,1733897428663 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(959): stopping server b66397bb9319,44539,1733897428597 2024-12-11T06:17:43,734 INFO [RS:2;b66397bb9319:42579 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T06:17:43,734 INFO [RS:2;b66397bb9319:42579 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;b66397bb9319:42579. 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;b66397bb9319:44539. 2024-12-11T06:17:43,734 DEBUG [RS:2;b66397bb9319:42579 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:17:43,734 DEBUG [RS:2;b66397bb9319:42579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,734 DEBUG [RS:1;b66397bb9319:44539 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:17:43,734 DEBUG [RS:1;b66397bb9319:44539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,734 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T06:17:43,734 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T06:17:43,734 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1325): Online Regions={84d6f5deb75d828a55d48a410e730336=testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336.} 2024-12-11T06:17:43,734 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1325): Online Regions={0c6631449afb0062d10d83de4eb96abd=hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., 122c4bf679e9253b1a206af0572eb5cb=testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb.} 2024-12-11T06:17:43,734 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0c6631449afb0062d10d83de4eb96abd, disabling compactions & flushes 2024-12-11T06:17:43,734 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 84d6f5deb75d828a55d48a410e730336, disabling compactions & flushes 2024-12-11T06:17:43,734 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:17:43,734 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:17:43,734 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:17:43,734 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:17:43,735 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. after waiting 0 ms 2024-12-11T06:17:43,735 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. after waiting 0 ms 2024-12-11T06:17:43,735 DEBUG [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1351): Waiting on 84d6f5deb75d828a55d48a410e730336 2024-12-11T06:17:43,735 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:17:43,735 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:17:43,735 DEBUG [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1351): Waiting on 0c6631449afb0062d10d83de4eb96abd, 122c4bf679e9253b1a206af0572eb5cb 2024-12-11T06:17:43,735 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0c6631449afb0062d10d83de4eb96abd 1/1 column families, dataSize=310 B heapSize=944 B 2024-12-11T06:17:43,739 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/84d6f5deb75d828a55d48a410e730336/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T06:17:43,739 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,740 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:17:43,740 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 84d6f5deb75d828a55d48a410e730336: Waiting for close lock at 1733897863734Running coprocessor pre-close hooks at 1733897863734Disabling compacts and flushes for region at 1733897863734Disabling writes for close at 1733897863735 (+1 ms)Writing region close event to WAL at 1733897863736 (+1 ms)Running coprocessor post-close hooks at 1733897863739 (+3 ms)Closed at 1733897863740 (+1 ms) 2024-12-11T06:17:43,740 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733897708598.84d6f5deb75d828a55d48a410e730336. 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(959): stopping server b66397bb9319,35939,1733897428441 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T06:17:43,740 INFO [RS:0;b66397bb9319:35939 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;b66397bb9319:35939. 2024-12-11T06:17:43,740 DEBUG [RS:0;b66397bb9319:35939 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:17:43,740 DEBUG [RS:0;b66397bb9319:35939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,741 INFO [RS:0;b66397bb9319:35939 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T06:17:43,741 INFO [RS:0;b66397bb9319:35939 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T06:17:43,741 INFO [RS:0;b66397bb9319:35939 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T06:17:43,741 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T06:17:43,741 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T06:17:43,741 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T06:17:43,741 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T06:17:43,741 DEBUG [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T06:17:43,741 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T06:17:43,741 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T06:17:43,741 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T06:17:43,741 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T06:17:43,741 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T06:17:43,742 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=88.80 KB heapSize=140.59 KB 2024-12-11T06:17:43,744 INFO [regionserver/b66397bb9319:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,754 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/ec6d2ae826ee417ca4a34b460f7877d8 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1733897829391/DeleteFamily/seqid=0 2024-12-11T06:17:43,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742469_1645 (size=5193) 2024-12-11T06:17:43,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742469_1645 (size=5193) 2024-12-11T06:17:43,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742469_1645 (size=5193) 2024-12-11T06:17:43,760 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=310 B at sequenceid=37 (bloomFilter=false), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/ec6d2ae826ee417ca4a34b460f7877d8 2024-12-11T06:17:43,765 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec6d2ae826ee417ca4a34b460f7877d8 2024-12-11T06:17:43,765 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/.tmp/l/ec6d2ae826ee417ca4a34b460f7877d8 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/l/ec6d2ae826ee417ca4a34b460f7877d8 2024-12-11T06:17:43,770 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for ec6d2ae826ee417ca4a34b460f7877d8 2024-12-11T06:17:43,770 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/l/ec6d2ae826ee417ca4a34b460f7877d8, entries=3, sequenceid=37, filesize=5.1 K 2024-12-11T06:17:43,771 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/info/3cfac3f91a2c4f79bbd3e9601a8dfebc is 173, key is testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb./info:regioninfo/1733897733164/Put/seqid=0 2024-12-11T06:17:43,771 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~310 B/310, heapSize ~928 B/928, currentSize=0 B/0 for 0c6631449afb0062d10d83de4eb96abd in 36ms, sequenceid=37, compaction requested=false 2024-12-11T06:17:43,771 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-11T06:17:43,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742470_1646 (size=16856) 2024-12-11T06:17:43,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742470_1646 (size=16856) 2024-12-11T06:17:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742470_1646 (size=16856) 2024-12-11T06:17:43,780 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/acl/0c6631449afb0062d10d83de4eb96abd/recovered.edits/40.seqid, newMaxSeqId=40, maxSeqId=29 2024-12-11T06:17:43,780 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=75.79 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/info/3cfac3f91a2c4f79bbd3e9601a8dfebc 2024-12-11T06:17:43,780 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,780 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:17:43,780 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0c6631449afb0062d10d83de4eb96abd: Waiting for close lock at 1733897863734Running coprocessor pre-close hooks at 1733897863734Disabling compacts and flushes for region at 1733897863734Disabling writes for close at 1733897863735 (+1 ms)Obtaining lock to block concurrent updates at 1733897863735Preparing flush snapshotting stores in 0c6631449afb0062d10d83de4eb96abd at 1733897863735Finished memstore snapshotting hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd., syncing WAL and waiting on mvcc, flushsize=dataSize=310, getHeapSize=928, getOffHeapSize=0, getCellsCount=5 at 1733897863735Flushing stores of hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. at 1733897863736 (+1 ms)Flushing 0c6631449afb0062d10d83de4eb96abd/l: creating writer at 1733897863736Flushing 0c6631449afb0062d10d83de4eb96abd/l: appending metadata at 1733897863753 (+17 ms)Flushing 0c6631449afb0062d10d83de4eb96abd/l: closing flushed file at 1733897863753Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@36a00a22: reopening flushed file at 1733897863765 (+12 ms)Finished flush of dataSize ~310 B/310, heapSize ~928 B/928, currentSize=0 B/0 for 0c6631449afb0062d10d83de4eb96abd in 36ms, sequenceid=37, compaction requested=false at 1733897863771 (+6 ms)Writing region close event to WAL at 1733897863776 (+5 ms)Running coprocessor post-close hooks at 1733897863780 (+4 ms)Closed at 1733897863780 2024-12-11T06:17:43,781 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733897431962.0c6631449afb0062d10d83de4eb96abd. 2024-12-11T06:17:43,781 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 122c4bf679e9253b1a206af0572eb5cb, disabling compactions & flushes 2024-12-11T06:17:43,781 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:17:43,781 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:17:43,781 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. after waiting 0 ms 2024-12-11T06:17:43,781 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:17:43,784 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/default/testExportExpiredSnapshot/122c4bf679e9253b1a206af0572eb5cb/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=8 2024-12-11T06:17:43,785 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,785 INFO [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:17:43,785 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 122c4bf679e9253b1a206af0572eb5cb: Waiting for close lock at 1733897863781Running coprocessor pre-close hooks at 1733897863781Disabling compacts and flushes for region at 1733897863781Disabling writes for close at 1733897863781Writing region close event to WAL at 1733897863782 (+1 ms)Running coprocessor post-close hooks at 1733897863785 (+3 ms)Closed at 1733897863785 2024-12-11T06:17:43,785 DEBUG [RS_CLOSE_REGION-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733897708598.122c4bf679e9253b1a206af0572eb5cb. 2024-12-11T06:17:43,799 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/ns/239468dfa87045f9b01f5980435c76ea is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e./ns:/1733897705788/DeleteFamily/seqid=0 2024-12-11T06:17:43,800 INFO [regionserver/b66397bb9319:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,800 INFO [regionserver/b66397bb9319:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742471_1647 (size=8378) 2024-12-11T06:17:43,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742471_1647 (size=8378) 2024-12-11T06:17:43,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742471_1647 (size=8378) 2024-12-11T06:17:43,805 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/ns/239468dfa87045f9b01f5980435c76ea 2024-12-11T06:17:43,830 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/rep_barrier/b2f59225c44349589a29189386e2e2fb is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e./rep_barrier:/1733897705788/DeleteFamily/seqid=0 2024-12-11T06:17:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742472_1648 (size=8717) 2024-12-11T06:17:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742472_1648 (size=8717) 2024-12-11T06:17:43,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742472_1648 (size=8717) 2024-12-11T06:17:43,836 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/rep_barrier/b2f59225c44349589a29189386e2e2fb 2024-12-11T06:17:43,855 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/table/8fa885ddf14242afafa5a9aa9c2ef067 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733897686003.366ea7dc0cdd14943ffc9e04b7ea738e./table:/1733897705788/DeleteFamily/seqid=0 2024-12-11T06:17:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742473_1649 (size=9531) 2024-12-11T06:17:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742473_1649 (size=9531) 2024-12-11T06:17:43,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742473_1649 (size=9531) 2024-12-11T06:17:43,862 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/table/8fa885ddf14242afafa5a9aa9c2ef067 2024-12-11T06:17:43,866 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/info/3cfac3f91a2c4f79bbd3e9601a8dfebc as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/info/3cfac3f91a2c4f79bbd3e9601a8dfebc 2024-12-11T06:17:43,870 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/info/3cfac3f91a2c4f79bbd3e9601a8dfebc, entries=94, sequenceid=244, filesize=16.5 K 2024-12-11T06:17:43,871 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/ns/239468dfa87045f9b01f5980435c76ea as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/ns/239468dfa87045f9b01f5980435c76ea 2024-12-11T06:17:43,874 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/ns/239468dfa87045f9b01f5980435c76ea, entries=28, sequenceid=244, filesize=8.2 K 2024-12-11T06:17:43,875 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/rep_barrier/b2f59225c44349589a29189386e2e2fb as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/rep_barrier/b2f59225c44349589a29189386e2e2fb 2024-12-11T06:17:43,878 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/rep_barrier/b2f59225c44349589a29189386e2e2fb, entries=26, sequenceid=244, filesize=8.5 K 2024-12-11T06:17:43,879 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/.tmp/table/8fa885ddf14242afafa5a9aa9c2ef067 as hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/table/8fa885ddf14242afafa5a9aa9c2ef067 2024-12-11T06:17:43,882 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/table/8fa885ddf14242afafa5a9aa9c2ef067, entries=43, sequenceid=244, filesize=9.3 K 2024-12-11T06:17:43,883 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~88.80 KB/90934, heapSize ~140.53 KB/143904, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=244, compaction requested=false 2024-12-11T06:17:43,892 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/data/hbase/meta/1588230740/recovered.edits/247.seqid, newMaxSeqId=247, maxSeqId=1 2024-12-11T06:17:43,893 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:43,893 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T06:17:43,893 INFO [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T06:17:43,893 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733897863741Running coprocessor pre-close hooks at 1733897863741Disabling compacts and flushes for region at 1733897863741Disabling writes for close at 1733897863741Obtaining lock to block concurrent updates at 1733897863742 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733897863742Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=90934, getHeapSize=143904, getOffHeapSize=0, getCellsCount=689 at 1733897863742Flushing stores of hbase:meta,,1.1588230740 at 1733897863742Flushing 1588230740/info: creating writer at 1733897863742Flushing 1588230740/info: appending metadata at 1733897863770 (+28 ms)Flushing 1588230740/info: closing flushed file at 1733897863770Flushing 1588230740/ns: creating writer at 1733897863784 (+14 ms)Flushing 1588230740/ns: appending metadata at 1733897863798 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733897863798Flushing 1588230740/rep_barrier: creating writer at 1733897863809 (+11 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733897863830 (+21 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733897863830Flushing 1588230740/table: creating writer at 1733897863840 (+10 ms)Flushing 1588230740/table: appending metadata at 1733897863854 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733897863854Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3950af10: reopening flushed file at 1733897863865 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@55cc2119: reopening flushed file at 1733897863870 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ee4edf2: reopening flushed file at 1733897863874 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d20a014: reopening flushed file at 1733897863878 (+4 ms)Finished flush of dataSize ~88.80 KB/90934, heapSize ~140.53 KB/143904, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=244, compaction requested=false at 1733897863883 (+5 ms)Writing region close event to WAL at 1733897863890 (+7 ms)Running coprocessor post-close hooks at 1733897863892 (+2 ms)Closed at 1733897863893 (+1 ms) 2024-12-11T06:17:43,893 DEBUG [RS_CLOSE_META-regionserver/b66397bb9319:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T06:17:43,935 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(976): stopping server b66397bb9319,42579,1733897428663; all regions closed. 2024-12-11T06:17:43,935 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(976): stopping server b66397bb9319,44539,1733897428597; all regions closed. 2024-12-11T06:17:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741834_1010 (size=10426) 2024-12-11T06:17:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741835_1011 (size=16278) 2024-12-11T06:17:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741834_1010 (size=10426) 2024-12-11T06:17:43,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741835_1011 (size=16278) 2024-12-11T06:17:43,941 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(976): stopping server b66397bb9319,35939,1733897428441; all regions closed. 2024-12-11T06:17:43,942 DEBUG [RS:2;b66397bb9319:42579 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs 2024-12-11T06:17:43,942 DEBUG [RS:1;b66397bb9319:44539 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs 2024-12-11T06:17:43,942 INFO [RS:1;b66397bb9319:44539 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b66397bb9319%2C44539%2C1733897428597:(num 1733897430866) 2024-12-11T06:17:43,942 INFO [RS:2;b66397bb9319:42579 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b66397bb9319%2C42579%2C1733897428663:(num 1733897430866) 2024-12-11T06:17:43,942 DEBUG [RS:2;b66397bb9319:42579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,942 DEBUG [RS:1;b66397bb9319:44539 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,942 INFO [RS:2;b66397bb9319:42579 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,942 INFO [RS:1;b66397bb9319:44539 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,942 INFO [RS:2;b66397bb9319:42579 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T06:17:43,942 INFO [RS:1;b66397bb9319:44539 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T06:17:43,942 INFO [RS:1;b66397bb9319:44539 {}] hbase.ChoreService(370): Chore service for: regionserver/b66397bb9319:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T06:17:43,942 INFO [RS:2;b66397bb9319:42579 {}] hbase.ChoreService(370): Chore service for: regionserver/b66397bb9319:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T06:17:43,943 INFO [RS:2;b66397bb9319:42579 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T06:17:43,943 INFO [RS:1;b66397bb9319:44539 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T06:17:43,943 INFO [RS:2;b66397bb9319:42579 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T06:17:43,943 INFO [RS:1;b66397bb9319:44539 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T06:17:43,943 INFO [RS:2;b66397bb9319:42579 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T06:17:43,943 INFO [RS:1;b66397bb9319:44539 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T06:17:43,943 INFO [regionserver/b66397bb9319:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T06:17:43,943 INFO [RS:2;b66397bb9319:42579 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T06:17:43,943 INFO [RS:1;b66397bb9319:44539 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T06:17:43,943 INFO [RS:2;b66397bb9319:42579 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42579 2024-12-11T06:17:43,943 INFO [RS:1;b66397bb9319:44539 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44539 2024-12-11T06:17:43,944 INFO [regionserver/b66397bb9319:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T06:17:43,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741836_1012 (size=103552) 2024-12-11T06:17:43,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741836_1012 (size=103552) 2024-12-11T06:17:43,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741836_1012 (size=103552) 2024-12-11T06:17:43,948 DEBUG [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs 2024-12-11T06:17:43,948 INFO [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b66397bb9319%2C35939%2C1733897428441.meta:.meta(num 1733897431269) 2024-12-11T06:17:43,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073741833_1009 (size=18115) 2024-12-11T06:17:43,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073741833_1009 (size=18115) 2024-12-11T06:17:43,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b66397bb9319,44539,1733897428597 2024-12-11T06:17:43,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T06:17:43,952 INFO [RS:1;b66397bb9319:44539 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T06:17:43,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b66397bb9319,42579,1733897428663 2024-12-11T06:17:43,952 INFO [RS:2;b66397bb9319:42579 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T06:17:43,954 DEBUG [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/oldWALs 2024-12-11T06:17:43,954 INFO [RS:0;b66397bb9319:35939 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL b66397bb9319%2C35939%2C1733897428441:(num 1733897430866) 2024-12-11T06:17:43,954 DEBUG [RS:0;b66397bb9319:35939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T06:17:43,954 INFO [RS:0;b66397bb9319:35939 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T06:17:43,954 INFO [RS:0;b66397bb9319:35939 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T06:17:43,955 INFO [RS:0;b66397bb9319:35939 {}] hbase.ChoreService(370): Chore service for: regionserver/b66397bb9319:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T06:17:43,955 INFO [RS:0;b66397bb9319:35939 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T06:17:43,955 INFO [regionserver/b66397bb9319:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T06:17:43,955 INFO [RS:0;b66397bb9319:35939 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35939 2024-12-11T06:17:43,956 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b66397bb9319,42579,1733897428663] 2024-12-11T06:17:43,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/b66397bb9319,35939,1733897428441 2024-12-11T06:17:43,957 INFO [RS:0;b66397bb9319:35939 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T06:17:43,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T06:17:43,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b66397bb9319,42579,1733897428663 already deleted, retry=false 2024-12-11T06:17:43,958 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b66397bb9319,42579,1733897428663 expired; onlineServers=2 2024-12-11T06:17:43,958 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b66397bb9319,44539,1733897428597] 2024-12-11T06:17:43,959 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b66397bb9319,44539,1733897428597 already deleted, retry=false 2024-12-11T06:17:43,959 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b66397bb9319,44539,1733897428597 expired; onlineServers=1 2024-12-11T06:17:43,960 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [b66397bb9319,35939,1733897428441] 2024-12-11T06:17:43,961 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/b66397bb9319,35939,1733897428441 already deleted, retry=false 2024-12-11T06:17:43,961 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; b66397bb9319,35939,1733897428441 expired; onlineServers=0 2024-12-11T06:17:43,961 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'b66397bb9319,39063,1733897427482' ***** 2024-12-11T06:17:43,961 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T06:17:43,961 INFO [M:0;b66397bb9319:39063 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T06:17:43,961 INFO [M:0;b66397bb9319:39063 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T06:17:43,961 DEBUG [M:0;b66397bb9319:39063 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T06:17:43,962 DEBUG [M:0;b66397bb9319:39063 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T06:17:43,962 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T06:17:43,962 DEBUG [master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.small.0-1733897430448 {}] cleaner.HFileCleaner(306): Exit Thread[master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.small.0-1733897430448,5,FailOnTimeoutGroup] 2024-12-11T06:17:43,962 DEBUG [master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.large.0-1733897430445 {}] cleaner.HFileCleaner(306): Exit Thread[master/b66397bb9319:0:becomeActiveMaster-HFileCleaner.large.0-1733897430445,5,FailOnTimeoutGroup] 2024-12-11T06:17:43,962 INFO [M:0;b66397bb9319:39063 {}] hbase.ChoreService(370): Chore service for: master/b66397bb9319:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T06:17:43,962 INFO [M:0;b66397bb9319:39063 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T06:17:43,962 DEBUG [M:0;b66397bb9319:39063 {}] master.HMaster(1795): Stopping service threads 2024-12-11T06:17:43,962 INFO [M:0;b66397bb9319:39063 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T06:17:43,962 INFO [M:0;b66397bb9319:39063 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T06:17:43,963 INFO [M:0;b66397bb9319:39063 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T06:17:43,963 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T06:17:43,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T06:17:43,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T06:17:43,964 DEBUG [M:0;b66397bb9319:39063 {}] zookeeper.ZKUtil(347): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T06:17:43,964 WARN [M:0;b66397bb9319:39063 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T06:17:43,965 INFO [M:0;b66397bb9319:39063 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/.lastflushedseqids 2024-12-11T06:17:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36519 is added to blk_1073742474_1650 (size=329) 2024-12-11T06:17:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33445 is added to blk_1073742474_1650 (size=329) 2024-12-11T06:17:43,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073742474_1650 (size=329) 2024-12-11T06:17:43,979 INFO [M:0;b66397bb9319:39063 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T06:17:43,979 INFO [M:0;b66397bb9319:39063 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T06:17:43,979 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T06:17:43,996 INFO [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:17:43,996 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:17:43,996 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T06:17:43,996 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T06:17:43,997 INFO [M:0;b66397bb9319:39063 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=1008.69 KB heapSize=1.18 MB 2024-12-11T06:17:43,997 ERROR [AsyncFSWAL-0-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:17:44,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42579-0x1007fb2ceb60003, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44539-0x1007fb2ceb60002, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,057 INFO [RS:2;b66397bb9319:42579 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T06:17:44,057 INFO [RS:1;b66397bb9319:44539 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T06:17:44,057 INFO [RS:1;b66397bb9319:44539 {}] regionserver.HRegionServer(1031): Exiting; stopping=b66397bb9319,44539,1733897428597; zookeeper connection closed. 2024-12-11T06:17:44,057 INFO [RS:2;b66397bb9319:42579 {}] regionserver.HRegionServer(1031): Exiting; stopping=b66397bb9319,42579,1733897428663; zookeeper connection closed. 2024-12-11T06:17:44,057 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3ce89f0e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3ce89f0e 2024-12-11T06:17:44,057 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3efee97d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3efee97d 2024-12-11T06:17:44,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,059 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35939-0x1007fb2ceb60001, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:17:44,059 INFO [RS:0;b66397bb9319:35939 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T06:17:44,059 INFO [RS:0;b66397bb9319:35939 {}] regionserver.HRegionServer(1031): Exiting; stopping=b66397bb9319,35939,1733897428441; zookeeper connection closed. 2024-12-11T06:17:44,059 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c71a8c0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c71a8c0 2024-12-11T06:17:44,059 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T06:17:46,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741835_1011 (size=16278) 2024-12-11T06:17:46,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741830_1006 (size=1185566) 2024-12-11T06:17:46,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741833_1009 (size=18115) 2024-12-11T06:17:46,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37875 is added to blk_1073741834_1010 (size=10426) 2024-12-11T06:17:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T06:17:48,055 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T06:17:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-11T06:17:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-11T06:17:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-11T06:17:48,056 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-11T06:17:49,254 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-11T06:17:56,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:18:26,298 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b66397bb9319:39063 241 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 48 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@30c28534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@810e6d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5032 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 51 Waiting on java.util.concurrent.CountDownLatch$Sync@3ee7654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11835 Waited count: 12691 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@207a85f3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2fd3d943 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1001 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 101 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42-acceptor-0@173365be-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3686 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c283e82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42173): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 168 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 168 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 49235 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e2a97bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42173): State: TIMED_WAITING Blocked count: 80 Waited count: 2709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42173): State: TIMED_WAITING Blocked count: 78 Waited count: 2703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42173): State: TIMED_WAITING Blocked count: 74 Waited count: 2707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42173): State: TIMED_WAITING Blocked count: 50 Waited count: 2720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42173): State: TIMED_WAITING Blocked count: 73 Waited count: 2714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 250 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88-acceptor-0@7cc1b682-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp301817503-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 998 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38485): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f736061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1491 Waited count: 1648 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@5a81a771-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:42173 from jenkins): State: TIMED_WAITING Blocked count: 1594 Waited count: 1594 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 0 Waited count: 2308 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 997 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46725): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e759b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1478 Waited count: 1641 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp626054808-157-acceptor-0@3d120c0b-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp626054808-158): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp626054808-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 997 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 37315): State: TIMED_WAITING Blocked count: 1 Waited count: 51 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 100 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@f8f721d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@398453e7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 3 Waited count: 373 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1525c6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1477 Waited count: 1648 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 499 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 501 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@9209bfc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62516): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 51 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 250 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 426 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f77ea61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:62516):): State: WAITING Blocked count: 1 Waited count: 531 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@787bf1fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 557 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@186e56f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 247 (LeaseRenewer:jenkins@localhost:42173): State: TIMED_WAITING Blocked count: 14 Waited count: 519 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6daeaec4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 424 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:62516)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20ce18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e1524de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d57956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 147 Waited count: 558 Waiting on java.util.concurrent.Semaphore$NonfairSync@663c1eb6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 477 Waiting on java.util.concurrent.Semaphore$NonfairSync@366721ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 8612 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41a3c02b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a1e6c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@257958f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a0476e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e855227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a2cb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;b66397bb9319:39063): State: TIMED_WAITING Blocked count: 12 Waited count: 4357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f081cfa0db8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 352 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (org.apache.hadoop.hdfs.PeerCache@299c68c9): State: TIMED_WAITING Blocked count: 0 Waited count: 166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4940 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 391 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 40 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 160 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 50 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49312 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 43 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5973a6ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175b08d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63ef6dd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (LeaseRenewer:jenkins.hfs.0@localhost:42173): State: TIMED_WAITING Blocked count: 14 Waited count: 519 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 492 (LeaseRenewer:jenkins.hfs.1@localhost:42173): State: TIMED_WAITING Blocked count: 14 Waited count: 520 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 497 (LeaseRenewer:jenkins.hfs.2@localhost:42173): State: TIMED_WAITING Blocked count: 14 Waited count: 519 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 500 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (region-location-0): State: WAITING Blocked count: 14 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 49149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 633 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 545 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 4 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 978 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1127 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52b4064f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1284 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@a9f749f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1879 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1880 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2025 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2766 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 434 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5747 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 370 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5748 (ForkJoinPool.commonPool-worker-7): State: TIMED_WAITING Blocked count: 0 Waited count: 445 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6630 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6631 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11388 (AsyncFSWAL-1-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72ecc87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11390 (java.util.concurrent.ThreadPoolExecutor$Worker@469adba1[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11391 (java.util.concurrent.ThreadPoolExecutor$Worker@572f0e60[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11394 (java.util.concurrent.ThreadPoolExecutor$Worker@14110228[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11395 (java.util.concurrent.ThreadPoolExecutor$Worker@642a609b[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11398 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T06:18:56,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:19:26,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b66397bb9319:39063 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 48 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@30c28534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@810e6d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 57 Waiting on java.util.concurrent.CountDownLatch$Sync@46fc5fcd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11835 Waited count: 12692 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@207a85f3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2fd3d943 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1121 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42-acceptor-0@173365be-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3686 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c283e82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42173): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e2a97bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42173): State: TIMED_WAITING Blocked count: 80 Waited count: 2770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42173): State: TIMED_WAITING Blocked count: 78 Waited count: 2765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42173): State: TIMED_WAITING Blocked count: 74 Waited count: 2769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42173): State: TIMED_WAITING Blocked count: 50 Waited count: 2781 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42173): State: TIMED_WAITING Blocked count: 73 Waited count: 2775 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88-acceptor-0@7cc1b682-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp301817503-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38485): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f736061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1511 Waited count: 1688 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@5a81a771-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:42173 from jenkins): State: TIMED_WAITING Blocked count: 1654 Waited count: 1654 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 0 Waited count: 2368 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46725): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e759b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1498 Waited count: 1681 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp626054808-157-acceptor-0@3d120c0b-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp626054808-158): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp626054808-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 37315): State: TIMED_WAITING Blocked count: 1 Waited count: 57 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@f8f721d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@398453e7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 3 Waited count: 393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1525c6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1497 Waited count: 1688 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 559 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@9209bfc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62516): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 431 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f77ea61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:62516):): State: WAITING Blocked count: 1 Waited count: 536 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@787bf1fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 562 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@186e56f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6daeaec4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:62516)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20ce18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e1524de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d57956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 147 Waited count: 558 Waiting on java.util.concurrent.Semaphore$NonfairSync@663c1eb6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 477 Waiting on java.util.concurrent.Semaphore$NonfairSync@366721ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 8612 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41a3c02b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a1e6c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@257958f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a0476e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e855227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a2cb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;b66397bb9319:39063): State: TIMED_WAITING Blocked count: 12 Waited count: 4357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f081cfa0db8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 352 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (org.apache.hadoop.hdfs.PeerCache@299c68c9): State: TIMED_WAITING Blocked count: 0 Waited count: 186 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5539 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 391 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 40 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54129cfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55314 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 43 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5973a6ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175b08d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63ef6dd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (region-location-0): State: WAITING Blocked count: 14 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 634 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 545 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 4 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 978 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52b4064f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1284 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@a9f749f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1879 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1880 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2025 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2766 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 434 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5747 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 370 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6630 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6631 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11388 (AsyncFSWAL-1-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72ecc87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11398 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T06:19:56,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:20:26,299 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:20:28,843 DEBUG [master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=25, reuseRatio=71.43% 2024-12-11T06:20:28,844 DEBUG [master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-11T06:20:36,862 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b66397bb9319:39063 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 48 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@30c28534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@810e6d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 63 Waiting on java.util.concurrent.CountDownLatch$Sync@5ac549ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11835 Waited count: 12693 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@207a85f3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2fd3d943 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1241 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 125 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42-acceptor-0@173365be-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3686 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c283e82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42173): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 61163 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e2a97bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42173): State: TIMED_WAITING Blocked count: 80 Waited count: 2832 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42173): State: TIMED_WAITING Blocked count: 78 Waited count: 2826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42173): State: TIMED_WAITING Blocked count: 74 Waited count: 2830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42173): State: TIMED_WAITING Blocked count: 50 Waited count: 2843 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42173): State: TIMED_WAITING Blocked count: 73 Waited count: 2836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 310 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88-acceptor-0@7cc1b682-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp301817503-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1238 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38485): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 378 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f736061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1531 Waited count: 1728 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@5a81a771-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:42173 from jenkins): State: TIMED_WAITING Blocked count: 1714 Waited count: 1714 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 0 Waited count: 2428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1237 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46725): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 382 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e759b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1518 Waited count: 1721 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp626054808-157-acceptor-0@3d120c0b-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp626054808-158): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp626054808-159): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1237 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 37315): State: TIMED_WAITING Blocked count: 1 Waited count: 63 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@440dbd6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1930c9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 124 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@f8f721d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@398453e7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 3 Waited count: 413 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1525c6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1517 Waited count: 1728 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fdfe557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@9209bfc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62516): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 310 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 435 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f77ea61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:62516):): State: WAITING Blocked count: 1 Waited count: 540 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@787bf1fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 566 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@186e56f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6daeaec4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:62516)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20ce18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e1524de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d57956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 147 Waited count: 558 Waiting on java.util.concurrent.Semaphore$NonfairSync@663c1eb6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 477 Waiting on java.util.concurrent.Semaphore$NonfairSync@366721ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 8612 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41a3c02b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a1e6c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@257958f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a0476e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e855227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a2cb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;b66397bb9319:39063): State: TIMED_WAITING Blocked count: 12 Waited count: 4357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f081cfa0db8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 352 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (org.apache.hadoop.hdfs.PeerCache@299c68c9): State: TIMED_WAITING Blocked count: 0 Waited count: 206 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6139 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 391 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 40 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54129cfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61315 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 43 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5973a6ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175b08d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63ef6dd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (region-location-0): State: WAITING Blocked count: 14 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 4 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 978 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52b4064f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1284 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@a9f749f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1879 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1880 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2025 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2766 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 434 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5747 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 371 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6630 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6631 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11388 (AsyncFSWAL-1-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72ecc87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11399 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11404 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T06:20:56,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:21:26,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b66397bb9319:39063 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 48 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@30c28534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@810e6d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6831 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 69 Waiting on java.util.concurrent.CountDownLatch$Sync@5a9618e7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11835 Waited count: 12694 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@207a85f3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2fd3d943 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1361 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42-acceptor-0@173365be-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3686 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c283e82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42173): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 67128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e2a97bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42173): State: TIMED_WAITING Blocked count: 80 Waited count: 2893 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42173): State: TIMED_WAITING Blocked count: 78 Waited count: 2887 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42173): State: TIMED_WAITING Blocked count: 74 Waited count: 2891 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42173): State: TIMED_WAITING Blocked count: 50 Waited count: 2904 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42173): State: TIMED_WAITING Blocked count: 73 Waited count: 2898 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88-acceptor-0@7cc1b682-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp301817503-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1358 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38485): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 398 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f736061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1551 Waited count: 1768 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@5a81a771-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:42173 from jenkins): State: TIMED_WAITING Blocked count: 1774 Waited count: 1774 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 0 Waited count: 2488 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1357 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46725): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 402 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e759b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1538 Waited count: 1761 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp626054808-157-acceptor-0@3d120c0b-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp626054808-158): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp626054808-159): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1357 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 37315): State: TIMED_WAITING Blocked count: 1 Waited count: 69 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@440dbd6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1930c9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 136 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@f8f721d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@398453e7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 3 Waited count: 433 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1525c6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1537 Waited count: 1768 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fdfe557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@9209bfc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62516): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 440 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f77ea61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:62516):): State: WAITING Blocked count: 1 Waited count: 545 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@787bf1fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 571 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@186e56f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6daeaec4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 511 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:62516)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20ce18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e1524de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d57956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 147 Waited count: 558 Waiting on java.util.concurrent.Semaphore$NonfairSync@663c1eb6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 477 Waiting on java.util.concurrent.Semaphore$NonfairSync@366721ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 8612 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41a3c02b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a1e6c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@257958f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a0476e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e855227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a2cb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;b66397bb9319:39063): State: TIMED_WAITING Blocked count: 12 Waited count: 4357 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1100/0x00007f081cfa0db8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 352 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (org.apache.hadoop.hdfs.PeerCache@299c68c9): State: TIMED_WAITING Blocked count: 0 Waited count: 226 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6739 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 391 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 40 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54129cfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67317 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 43 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5973a6ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175b08d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63ef6dd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (region-location-0): State: WAITING Blocked count: 14 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 305 Waiting on java.util.concurrent.ForkJoinPool@5ffae91 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 4 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 978 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52b4064f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1284 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@a9f749f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1879 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1880 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2025 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2766 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 435 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6630 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6631 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11388 (AsyncFSWAL-1-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72ecc87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11399 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11404 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-11T06:21:56,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:22:26,300 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T06:22:43,998 DEBUG [M:0;b66397bb9319:39063 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733897863979Disabling compacts and flushes for region at 1733897863979Disabling writes for close at 1733897863996 (+17 ms)Obtaining lock to block concurrent updates at 1733897863997 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733897863997Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1032896, getHeapSize=1238144, getOffHeapSize=0, getCellsCount=2706 at 1733897863997Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733898163998 (+300001 ms) 2024-12-11T06:22:43,998 WARN [M:0;b66397bb9319:39063 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4654, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-11T06:22:44,000 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:22:44,002 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-11T06:22:44,002 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-11T06:22:44,002 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 2024-12-11T06:22:44,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:22:44,004 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:22:44,005 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 2024-12-11T06:22:44,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;b66397bb9319:39063 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 48 Waited count: 19 Waiting on java.lang.ref.ReferenceQueue$Lock@30c28534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 0 Waited count: 40 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@810e6d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7431 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 75 Waiting on java.util.concurrent.CountDownLatch$Sync@2aee9a0f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11835 Waited count: 12695 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 14 Waited count: 15 Waiting on java.lang.ref.ReferenceQueue$Lock@207a85f3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@2fd3d943 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@a743795): State: TIMED_WAITING Blocked count: 0 Waited count: 1481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 149 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1874862952-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1874862952-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1874862952-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1874862952-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1874862952-41): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1874862952-42-acceptor-0@173365be-ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:34241}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1874862952-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1874862952-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-4c8643e2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 41 Waited count: 3686 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c283e82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 42173): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3b857160): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 249 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@57b0ae63): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 6 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 73093 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 2 Waited count: 1843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4e2a97bb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 42173): State: TIMED_WAITING Blocked count: 80 Waited count: 2954 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 42173): State: TIMED_WAITING Blocked count: 78 Waited count: 2949 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 42173): State: TIMED_WAITING Blocked count: 74 Waited count: 2953 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 42173): State: TIMED_WAITING Blocked count: 50 Waited count: 2965 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 42173): State: TIMED_WAITING Blocked count: 73 Waited count: 2959 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5a1288c7): State: TIMED_WAITING Blocked count: 0 Waited count: 370 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@237c9bcb): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2f91011b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@595285c4): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(294475220)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp301817503-87): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp301817503-88-acceptor-0@7cc1b682-ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:46863}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp301817503-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp301817503-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-338e7c55-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@222c8e79): State: TIMED_WAITING Blocked count: 0 Waited count: 1478 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 38485): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 2 Waited count: 418 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6f736061 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1571 Waited count: 1808 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@7e58ecbb): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 38485): State: TIMED_WAITING Blocked count: 0 Waited count: 744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1949933414-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1949933414-122-acceptor-0@5a81a771-ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:39435}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1949933414-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1949933414-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Client (1538188378) connection to localhost/127.0.0.1:42173 from jenkins): State: TIMED_WAITING Blocked count: 1834 Waited count: 1834 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 125 (Session-HouseKeeper-13c931a3-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (IPC Parameter Sending Thread for localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 0 Waited count: 2548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 128 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6089a67d): State: TIMED_WAITING Blocked count: 0 Waited count: 1477 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 130 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 131 (IPC Server idle connection scanner for port 46725): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 133 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (Command processor): State: WAITING Blocked count: 1 Waited count: 422 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e759b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 138 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1558 Waited count: 1801 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 139 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@23926a4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 132 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 129 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 140 (IPC Server handler 0 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 752 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 1 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 759 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 2 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 3 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 46725): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp626054808-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f081c42ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp626054808-157-acceptor-0@3d120c0b-ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:34795}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp626054808-158): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp626054808-159): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 161 (Session-HouseKeeper-445a8f77-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 190 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@629996a1): State: TIMED_WAITING Blocked count: 0 Waited count: 1477 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 194 (IPC Server idle connection scanner for port 37315): State: TIMED_WAITING Blocked count: 1 Waited count: 75 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 195 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@440dbd6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1930c9b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (java.util.concurrent.ThreadPoolExecutor$Worker@f8f721d[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@398453e7[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 3 Waited count: 453 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1525c6ab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173): State: TIMED_WAITING Blocked count: 1557 Waited count: 1808 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@1daf733b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 192 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 739 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 741 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 37315): State: TIMED_WAITING Blocked count: 0 Waited count: 740 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1fdfe557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@9209bfc[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62516): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 370 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 9 Waited count: 444 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7f77ea61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:62516):): State: WAITING Blocked count: 1 Waited count: 549 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@787bf1fc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 575 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@186e56f9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@6daeaec4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 29 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:62516)): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20ce18a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5e1524de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dca6dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d57956 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 147 Waited count: 558 Waiting on java.util.concurrent.Semaphore$NonfairSync@663c1eb6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 477 Waiting on java.util.concurrent.Semaphore$NonfairSync@366721ed Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39063): State: WAITING Blocked count: 98 Waited count: 8612 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41a3c02b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2c510e80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2a1e6c12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@257958f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@7a0476e9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39063): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5e855227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7a2cb19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 331 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 46 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;b66397bb9319:39063): State: TIMED_WAITING Blocked count: 12 Waited count: 4358 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1415/0x00007f081d23b3e8.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (master/b66397bb9319:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (org.apache.hadoop.hdfs.PeerCache@299c68c9): State: TIMED_WAITING Blocked count: 0 Waited count: 246 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 376 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7338 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 391 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 40 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 75 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@54129cfa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73319 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 43 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 30 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5973a6ee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57eebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@175b08d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/b66397bb9319:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63ef6dd8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 500 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (region-location-0): State: WAITING Blocked count: 14 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 524 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 526 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 535 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 545 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 306 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 4 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 978 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1151 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 74 Waited count: 120 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@52b4064f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1284 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1288 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1289 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@a9f749f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1879 (region-location-3): State: WAITING Blocked count: 4 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1880 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27eff39d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2025 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6630 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6631 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11388 (AsyncFSWAL-1-hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData-prefix:b66397bb9319,39063,1733897427482): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@72ecc87b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11404 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11407 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11408 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1404/0x00007f081d233fe0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-11T06:22:48,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T06:22:49,000 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-11T06:22:49,001 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T06:22:49,001 INFO [M:0;b66397bb9319:39063 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T06:22:49,001 INFO [M:0;b66397bb9319:39063 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39063 2024-12-11T06:22:49,001 INFO [M:0;b66397bb9319:39063 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T06:22:49,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42173/user/jenkins/test-data/71feb48e-9185-4521-6a26-c9cb44559b2b/MasterData/WALs/b66397bb9319,39063,1733897427482/b66397bb9319%2C39063%2C1733897427482.1733897429504 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-11T06:22:49,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:22:49,103 INFO [M:0;b66397bb9319:39063 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T06:22:49,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39063-0x1007fb2ceb60000, quorum=127.0.0.1:62516, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T06:22:49,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68703158{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:22:49,107 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42021b37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:22:49,107 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:22:49,107 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@bc523e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T06:22:49,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@693ee7fc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:22:49,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T06:22:49,109 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T06:22:49,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T06:22:49,109 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-723867772-172.17.0.2-1733897423063 (Datanode Uuid 75e5eb78-af4b-40fd-a136-a194224b2e9f) service to localhost/127.0.0.1:42173 2024-12-11T06:22:49,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data5/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data6/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,112 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T06:22:49,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@213fabce{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:22:49,114 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4248c76a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:22:49,114 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:22:49,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca589bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T06:22:49,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f30d4b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:22:49,116 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T06:22:49,116 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T06:22:49,116 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-723867772-172.17.0.2-1733897423063 (Datanode Uuid a0cb6457-4909-410b-b27c-791da41291ae) service to localhost/127.0.0.1:42173 2024-12-11T06:22:49,116 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T06:22:49,116 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data3/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data4/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T06:22:49,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b3e82e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T06:22:49,119 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69f92038{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:22:49,119 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:22:49,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27a35a11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T06:22:49,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@394d89b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:22:49,120 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T06:22:49,120 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T06:22:49,120 WARN [BP-723867772-172.17.0.2-1733897423063 heartbeating to localhost/127.0.0.1:42173 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-723867772-172.17.0.2-1733897423063 (Datanode Uuid 72cee194-7e77-452d-85dc-f786cfd4c1a8) service to localhost/127.0.0.1:42173 2024-12-11T06:22:49,120 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T06:22:49,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data1/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,121 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/cluster_6519a15e-3cfe-d667-ed0c-a174af661462/data/data2/current/BP-723867772-172.17.0.2-1733897423063 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T06:22:49,122 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T06:22:49,128 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44c957a2{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T06:22:49,128 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30f56608{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T06:22:49,129 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T06:22:49,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20e934eb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T06:22:49,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41878587{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/d9395aea-5747-dcf5-b2a2-6fc92df2b2df/hadoop.log.dir/,STOPPED} 2024-12-11T06:22:49,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T06:22:49,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down