2024-11-10 10:31:46,268 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-10 10:31:46,279 main DEBUG Took 0.008960 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-10 10:31:46,279 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-10 10:31:46,279 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-10 10:31:46,280 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-10 10:31:46,281 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,288 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-10 10:31:46,299 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,300 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,301 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,301 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,302 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,302 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,303 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,303 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,303 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,304 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,304 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,305 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,305 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,306 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,306 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,306 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,306 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,307 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,307 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,307 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,308 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,308 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,308 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-10 10:31:46,309 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,309 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-10 10:31:46,310 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-10 10:31:46,312 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-10 10:31:46,313 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-10 10:31:46,314 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-10 10:31:46,315 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-10 10:31:46,315 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-10 10:31:46,323 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-10 10:31:46,326 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-10 10:31:46,327 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-10 10:31:46,328 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-10 10:31:46,328 main DEBUG createAppenders(={Console}) 2024-11-10 10:31:46,329 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-11-10 10:31:46,329 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-10 10:31:46,329 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-11-10 10:31:46,330 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-10 10:31:46,330 main DEBUG OutputStream closed 2024-11-10 10:31:46,330 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-10 10:31:46,330 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-10 10:31:46,330 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-11-10 10:31:46,396 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-10 10:31:46,398 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-10 10:31:46,399 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-10 10:31:46,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-10 10:31:46,400 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-10 10:31:46,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-10 10:31:46,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-10 10:31:46,401 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-10 10:31:46,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-10 10:31:46,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-10 10:31:46,402 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-10 10:31:46,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-10 10:31:46,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-10 10:31:46,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-10 10:31:46,403 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-10 10:31:46,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-10 10:31:46,404 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-10 10:31:46,405 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-10 10:31:46,407 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10 10:31:46,407 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-11-10 10:31:46,407 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-10 10:31:46,408 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-11-10T10:31:46,685 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80 2024-11-10 10:31:46,687 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-10 10:31:46,688 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-10T10:31:46,697 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-11-10T10:31:46,702 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-11-10T10:31:46,723 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-10T10:31:46,769 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-10T10:31:46,769 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-10T10:31:46,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-10T10:31:46,805 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48, deleteOnExit=true 2024-11-10T10:31:46,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-10T10:31:46,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/test.cache.data in system properties and HBase conf 2024-11-10T10:31:46,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.tmp.dir in system properties and HBase conf 2024-11-10T10:31:46,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.log.dir in system properties and HBase conf 2024-11-10T10:31:46,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-10T10:31:46,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-10T10:31:46,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-10T10:31:46,893 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-10T10:31:46,992 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-10T10:31:46,996 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-10T10:31:46,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-10T10:31:46,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-10T10:31:46,998 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T10:31:46,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-10T10:31:46,999 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-10T10:31:47,000 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-10T10:31:47,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T10:31:47,001 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-10T10:31:47,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/nfs.dump.dir in system properties and HBase conf 2024-11-10T10:31:47,002 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/java.io.tmpdir in system properties and HBase conf 2024-11-10T10:31:47,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-10T10:31:47,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-10T10:31:47,004 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-10T10:31:47,863 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-10T10:31:47,948 INFO [Time-limited test {}] log.Log(170): Logging initialized @2410ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-10T10:31:48,031 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T10:31:48,099 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T10:31:48,131 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T10:31:48,131 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T10:31:48,132 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T10:31:48,152 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T10:31:48,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.log.dir/,AVAILABLE} 2024-11-10T10:31:48,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T10:31:48,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/java.io.tmpdir/jetty-localhost-43815-hadoop-hdfs-3_4_1-tests_jar-_-any-14381505552948150402/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-10T10:31:48,371 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:43815} 2024-11-10T10:31:48,372 INFO [Time-limited test {}] server.Server(415): Started @2833ms 2024-11-10T10:31:48,771 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T10:31:48,777 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T10:31:48,778 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T10:31:48,778 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T10:31:48,779 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-10T10:31:48,779 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.log.dir/,AVAILABLE} 2024-11-10T10:31:48,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T10:31:48,902 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/java.io.tmpdir/jetty-localhost-35791-hadoop-hdfs-3_4_1-tests_jar-_-any-16029793786604764324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T10:31:48,903 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:35791} 2024-11-10T10:31:48,903 INFO [Time-limited test {}] server.Server(415): Started @3365ms 2024-11-10T10:31:48,954 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T10:31:49,086 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T10:31:49,093 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T10:31:49,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T10:31:49,094 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T10:31:49,094 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T10:31:49,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.log.dir/,AVAILABLE} 2024-11-10T10:31:49,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T10:31:49,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/java.io.tmpdir/jetty-localhost-32819-hadoop-hdfs-3_4_1-tests_jar-_-any-9731545810963317697/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T10:31:49,244 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:32819} 2024-11-10T10:31:49,245 INFO [Time-limited test {}] server.Server(415): Started @3706ms 2024-11-10T10:31:49,247 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T10:31:49,290 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-10T10:31:49,296 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-10T10:31:49,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-10T10:31:49,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-10T10:31:49,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-10T10:31:49,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/hadoop.log.dir/,AVAILABLE} 2024-11-10T10:31:49,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-10T10:31:49,457 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/java.io.tmpdir/jetty-localhost-35917-hadoop-hdfs-3_4_1-tests_jar-_-any-14115820630958462954/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-10T10:31:49,458 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:35917} 2024-11-10T10:31:49,458 INFO [Time-limited test {}] server.Server(415): Started @3920ms 2024-11-10T10:31:49,461 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-10T10:31:49,480 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data1/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,480 WARN [Thread-112 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data4/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,480 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data3/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,480 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data2/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,568 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T10:31:49,580 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T10:31:49,639 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data5/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,641 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data6/current/BP-1220159422-172.17.0.2-1731234707608/current, will proceed with Du for space computation calculation, 2024-11-10T10:31:49,661 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac43b41fd2d87ee1 with lease ID 0x690de26813f99c6f: Processing first storage report for DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d from datanode DatanodeRegistration(127.0.0.1:43217, datanodeUuid=a673c7d4-541a-41f0-b75d-c5de82e0aa25, infoPort=38697, infoSecurePort=0, ipcPort=41313, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac43b41fd2d87ee1 with lease ID 0x690de26813f99c6f: from storage DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d node DatanodeRegistration(127.0.0.1:43217, datanodeUuid=a673c7d4-541a-41f0-b75d-c5de82e0aa25, infoPort=38697, infoSecurePort=0, ipcPort=41313, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7737e8af983ac5d2 with lease ID 0x690de26813f99c70: Processing first storage report for DS-318b6bed-b662-4120-839e-ab74179655d9 from datanode DatanodeRegistration(127.0.0.1:44251, datanodeUuid=acfd5911-0a59-446d-be88-432542f502a2, infoPort=41983, infoSecurePort=0, ipcPort=42563, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7737e8af983ac5d2 with lease ID 0x690de26813f99c70: from storage DS-318b6bed-b662-4120-839e-ab74179655d9 node DatanodeRegistration(127.0.0.1:44251, datanodeUuid=acfd5911-0a59-446d-be88-432542f502a2, infoPort=41983, infoSecurePort=0, ipcPort=42563, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xac43b41fd2d87ee1 with lease ID 0x690de26813f99c6f: Processing first storage report for DS-cc5d70d9-1016-46ce-95c0-1fb98310b597 from datanode DatanodeRegistration(127.0.0.1:43217, datanodeUuid=a673c7d4-541a-41f0-b75d-c5de82e0aa25, infoPort=38697, infoSecurePort=0, ipcPort=41313, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xac43b41fd2d87ee1 with lease ID 0x690de26813f99c6f: from storage DS-cc5d70d9-1016-46ce-95c0-1fb98310b597 node DatanodeRegistration(127.0.0.1:43217, datanodeUuid=a673c7d4-541a-41f0-b75d-c5de82e0aa25, infoPort=38697, infoSecurePort=0, ipcPort=41313, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7737e8af983ac5d2 with lease ID 0x690de26813f99c70: Processing first storage report for DS-063748f8-e370-4dcb-af07-9fe869d196bb from datanode DatanodeRegistration(127.0.0.1:44251, datanodeUuid=acfd5911-0a59-446d-be88-432542f502a2, infoPort=41983, infoSecurePort=0, ipcPort=42563, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,665 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7737e8af983ac5d2 with lease ID 0x690de26813f99c70: from storage DS-063748f8-e370-4dcb-af07-9fe869d196bb node DatanodeRegistration(127.0.0.1:44251, datanodeUuid=acfd5911-0a59-446d-be88-432542f502a2, infoPort=41983, infoSecurePort=0, ipcPort=42563, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,673 WARN [Thread-117 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-10T10:31:49,680 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c0ac184dba37b22 with lease ID 0x690de26813f99c71: Processing first storage report for DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa from datanode DatanodeRegistration(127.0.0.1:44741, datanodeUuid=25d71697-ac4a-4844-ae61-7e4a50f38157, infoPort=37657, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,680 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c0ac184dba37b22 with lease ID 0x690de26813f99c71: from storage DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa node DatanodeRegistration(127.0.0.1:44741, datanodeUuid=25d71697-ac4a-4844-ae61-7e4a50f38157, infoPort=37657, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,680 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2c0ac184dba37b22 with lease ID 0x690de26813f99c71: Processing first storage report for DS-2a20973a-d243-480b-bdd6-b5fba616b9c7 from datanode DatanodeRegistration(127.0.0.1:44741, datanodeUuid=25d71697-ac4a-4844-ae61-7e4a50f38157, infoPort=37657, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608) 2024-11-10T10:31:49,681 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2c0ac184dba37b22 with lease ID 0x690de26813f99c71: from storage DS-2a20973a-d243-480b-bdd6-b5fba616b9c7 node DatanodeRegistration(127.0.0.1:44741, datanodeUuid=25d71697-ac4a-4844-ae61-7e4a50f38157, infoPort=37657, infoSecurePort=0, ipcPort=44109, storageInfo=lv=-57;cid=testClusterID;nsid=2146690655;c=1731234707608), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-10T10:31:49,902 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80 2024-11-10T10:31:49,975 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/zookeeper_0, clientPort=57026, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-10T10:31:49,985 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57026 2024-11-10T10:31:49,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:49,998 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:50,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741825_1001 (size=7) 2024-11-10T10:31:50,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741825_1001 (size=7) 2024-11-10T10:31:50,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741825_1001 (size=7) 2024-11-10T10:31:50,634 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf with version=8 2024-11-10T10:31:50,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/hbase-staging 2024-11-10T10:31:50,954 INFO [Time-limited test {}] client.ConnectionUtils(128): master/eccb6f743e07:0 server-side Connection retries=45 2024-11-10T10:31:50,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:50,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:50,970 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T10:31:50,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:50,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T10:31:51,107 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-10T10:31:51,165 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-10T10:31:51,174 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-10T10:31:51,178 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T10:31:51,205 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 4895 (auto-detected) 2024-11-10T10:31:51,206 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-10T10:31:51,225 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38215 2024-11-10T10:31:51,245 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38215 connecting to ZooKeeper ensemble=127.0.0.1:57026 2024-11-10T10:31:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:382150x0, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T10:31:51,280 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38215-0x101016f2c550000 connected 2024-11-10T10:31:51,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,306 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,315 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:31:51,319 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf, hbase.cluster.distributed=false 2024-11-10T10:31:51,341 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T10:31:51,345 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38215 2024-11-10T10:31:51,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38215 2024-11-10T10:31:51,346 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38215 2024-11-10T10:31:51,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38215 2024-11-10T10:31:51,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38215 2024-11-10T10:31:51,468 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/eccb6f743e07:0 server-side Connection retries=45 2024-11-10T10:31:51,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,470 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T10:31:51,470 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,471 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T10:31:51,474 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T10:31:51,476 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T10:31:51,477 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39345 2024-11-10T10:31:51,479 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39345 connecting to ZooKeeper ensemble=127.0.0.1:57026 2024-11-10T10:31:51,480 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,484 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393450x0, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T10:31:51,492 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39345-0x101016f2c550001 connected 2024-11-10T10:31:51,493 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:31:51,497 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T10:31:51,504 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T10:31:51,506 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T10:31:51,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T10:31:51,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-10T10:31:51,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39345 2024-11-10T10:31:51,516 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39345 2024-11-10T10:31:51,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-10T10:31:51,517 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39345 2024-11-10T10:31:51,534 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/eccb6f743e07:0 server-side Connection retries=45 2024-11-10T10:31:51,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,535 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,535 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T10:31:51,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,536 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T10:31:51,536 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T10:31:51,536 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T10:31:51,537 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44191 2024-11-10T10:31:51,539 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44191 connecting to ZooKeeper ensemble=127.0.0.1:57026 2024-11-10T10:31:51,540 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,543 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,549 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441910x0, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T10:31:51,550 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44191-0x101016f2c550002 connected 2024-11-10T10:31:51,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:31:51,550 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T10:31:51,552 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T10:31:51,554 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T10:31:51,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T10:31:51,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44191 2024-11-10T10:31:51,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44191 2024-11-10T10:31:51,560 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44191 2024-11-10T10:31:51,561 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44191 2024-11-10T10:31:51,562 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44191 2024-11-10T10:31:51,582 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/eccb6f743e07:0 server-side Connection retries=45 2024-11-10T10:31:51,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,582 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,582 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-10T10:31:51,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-10T10:31:51,583 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-10T10:31:51,583 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-10T10:31:51,583 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-10T10:31:51,584 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39867 2024-11-10T10:31:51,587 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39867 connecting to ZooKeeper ensemble=127.0.0.1:57026 2024-11-10T10:31:51,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,591 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398670x0, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-10T10:31:51,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398670x0, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:31:51,598 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39867-0x101016f2c550003 connected 2024-11-10T10:31:51,598 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-10T10:31:51,599 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-10T10:31:51,600 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-10T10:31:51,602 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-10T10:31:51,602 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39867 2024-11-10T10:31:51,603 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39867 2024-11-10T10:31:51,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39867 2024-11-10T10:31:51,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39867 2024-11-10T10:31:51,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39867 2024-11-10T10:31:51,624 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;eccb6f743e07:38215 2024-11-10T10:31:51,625 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/eccb6f743e07,38215,1731234710743 2024-11-10T10:31:51,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,634 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/eccb6f743e07,38215,1731234710743 2024-11-10T10:31:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T10:31:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T10:31:51,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-10T10:31:51,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,660 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-10T10:31:51,661 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/eccb6f743e07,38215,1731234710743 from backup master directory 2024-11-10T10:31:51,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/eccb6f743e07,38215,1731234710743 2024-11-10T10:31:51,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-10T10:31:51,665 WARN [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T10:31:51,665 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=eccb6f743e07,38215,1731234710743 2024-11-10T10:31:51,667 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-10T10:31:51,669 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-10T10:31:51,735 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/hbase.id] with ID: 4ca9019e-9f51-4a88-9744-47822550e251 2024-11-10T10:31:51,735 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/.tmp/hbase.id 2024-11-10T10:31:51,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741826_1002 (size=42) 2024-11-10T10:31:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741826_1002 (size=42) 2024-11-10T10:31:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741826_1002 (size=42) 2024-11-10T10:31:51,758 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/.tmp/hbase.id]:[hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/hbase.id] 2024-11-10T10:31:51,804 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-10T10:31:51,809 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-10T10:31:51,830 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-10T10:31:51,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:51,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741827_1003 (size=196) 2024-11-10T10:31:51,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741827_1003 (size=196) 2024-11-10T10:31:51,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741827_1003 (size=196) 2024-11-10T10:31:51,873 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T10:31:51,875 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-10T10:31:51,881 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741828_1004 (size=1189) 2024-11-10T10:31:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741828_1004 (size=1189) 2024-11-10T10:31:51,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741828_1004 (size=1189) 2024-11-10T10:31:51,935 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store 2024-11-10T10:31:51,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741829_1005 (size=34) 2024-11-10T10:31:51,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741829_1005 (size=34) 2024-11-10T10:31:51,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741829_1005 (size=34) 2024-11-10T10:31:51,965 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-10T10:31:51,969 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:51,971 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T10:31:51,971 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:31:51,971 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:31:51,973 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T10:31:51,973 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:31:51,973 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:31:51,975 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731234711970Disabling compacts and flushes for region at 1731234711970Disabling writes for close at 1731234711973 (+3 ms)Writing region close event to WAL at 1731234711973Closed at 1731234711973 2024-11-10T10:31:51,977 WARN [master/eccb6f743e07:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/.initializing 2024-11-10T10:31:51,978 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/WALs/eccb6f743e07,38215,1731234710743 2024-11-10T10:31:51,987 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:52,002 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=eccb6f743e07%2C38215%2C1731234710743, suffix=, logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/WALs/eccb6f743e07,38215,1731234710743, archiveDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/oldWALs, maxLogs=10 2024-11-10T10:31:52,038 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/WALs/eccb6f743e07,38215,1731234710743/eccb6f743e07%2C38215%2C1731234710743.1731234712008, exclude list is [], retry=0 2024-11-10T10:31:52,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:31:52,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:52,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:52,065 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:52,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-10T10:31:52,112 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/WALs/eccb6f743e07,38215,1731234710743/eccb6f743e07%2C38215%2C1731234710743.1731234712008 2024-11-10T10:31:52,113 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:52,113 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:31:52,114 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:52,117 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,118 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,157 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-10T10:31:52,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:52,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,200 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-10T10:31:52,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:52,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-10T10:31:52,204 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:52,205 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-10T10:31:52,208 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:52,209 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,213 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,214 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,219 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,219 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,223 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T10:31:52,226 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-10T10:31:52,231 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:31:52,233 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72081933, jitterRate=0.07410450279712677}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T10:31:52,239 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731234712131Initializing all the Stores at 1731234712133 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234712134 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234712134Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234712135 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234712135Cleaning up temporary data from old regions at 1731234712220 (+85 ms)Region opened successfully at 1731234712239 (+19 ms) 2024-11-10T10:31:52,240 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-10T10:31:52,278 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a7f6440, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=eccb6f743e07/172.17.0.2:0 2024-11-10T10:31:52,310 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-10T10:31:52,322 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-10T10:31:52,323 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-10T10:31:52,326 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-10T10:31:52,327 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-10T10:31:52,333 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-10T10:31:52,333 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-10T10:31:52,361 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-10T10:31:52,370 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-10T10:31:52,372 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-10T10:31:52,375 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-10T10:31:52,377 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-10T10:31:52,378 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-10T10:31:52,380 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-10T10:31:52,384 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-10T10:31:52,386 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-10T10:31:52,387 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-10T10:31:52,390 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-10T10:31:52,409 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-10T10:31:52,411 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-10T10:31:52,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,418 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=eccb6f743e07,38215,1731234710743, sessionid=0x101016f2c550000, setting cluster-up flag (Was=false) 2024-11-10T10:31:52,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,439 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-10T10:31:52,442 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=eccb6f743e07,38215,1731234710743 2024-11-10T10:31:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:52,455 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-10T10:31:52,457 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=eccb6f743e07,38215,1731234710743 2024-11-10T10:31:52,464 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-10T10:31:52,512 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(746): ClusterId : 4ca9019e-9f51-4a88-9744-47822550e251 2024-11-10T10:31:52,512 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(746): ClusterId : 4ca9019e-9f51-4a88-9744-47822550e251 2024-11-10T10:31:52,514 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(746): ClusterId : 4ca9019e-9f51-4a88-9744-47822550e251 2024-11-10T10:31:52,516 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T10:31:52,518 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T10:31:52,518 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-10T10:31:52,523 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T10:31:52,523 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T10:31:52,523 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-10T10:31:52,523 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T10:31:52,523 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T10:31:52,523 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-10T10:31:52,527 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T10:31:52,527 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T10:31:52,527 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-10T10:31:52,527 DEBUG [RS:1;eccb6f743e07:44191 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d8f7bed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=eccb6f743e07/172.17.0.2:0 2024-11-10T10:31:52,527 DEBUG [RS:0;eccb6f743e07:39345 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@101e8bf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=eccb6f743e07/172.17.0.2:0 2024-11-10T10:31:52,527 DEBUG [RS:2;eccb6f743e07:39867 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26377779, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=eccb6f743e07/172.17.0.2:0 2024-11-10T10:31:52,544 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;eccb6f743e07:39345 2024-11-10T10:31:52,544 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;eccb6f743e07:39867 2024-11-10T10:31:52,547 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T10:31:52,547 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T10:31:52,547 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T10:31:52,547 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T10:31:52,547 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T10:31:52,547 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T10:31:52,547 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;eccb6f743e07:44191 2024-11-10T10:31:52,548 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-10T10:31:52,548 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-10T10:31:52,548 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-10T10:31:52,552 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=39867, startcode=1731234711581 2024-11-10T10:31:52,552 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=44191, startcode=1731234711534 2024-11-10T10:31:52,552 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=39345, startcode=1731234711428 2024-11-10T10:31:52,565 DEBUG [RS:2;eccb6f743e07:39867 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T10:31:52,565 DEBUG [RS:0;eccb6f743e07:39345 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T10:31:52,565 DEBUG [RS:1;eccb6f743e07:44191 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-10T10:31:52,566 INFO [AsyncFSWAL-0-hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData-prefix:eccb6f743e07,38215,1731234710743 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-11-10T10:31:52,584 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-10T10:31:52,598 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-10T10:31:52,609 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-10T10:31:52,609 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49945, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T10:31:52,609 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59997, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T10:31:52,609 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53345, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-10T10:31:52,617 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T10:31:52,617 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: eccb6f743e07,38215,1731234710743 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-10T10:31:52,624 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T10:31:52,625 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/eccb6f743e07:0, corePoolSize=5, maxPoolSize=5 2024-11-10T10:31:52,625 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-10T10:31:52,626 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/eccb6f743e07:0, corePoolSize=5, maxPoolSize=5 2024-11-10T10:31:52,626 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/eccb6f743e07:0, corePoolSize=5, maxPoolSize=5 2024-11-10T10:31:52,626 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/eccb6f743e07:0, corePoolSize=5, maxPoolSize=5 2024-11-10T10:31:52,626 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/eccb6f743e07:0, corePoolSize=10, maxPoolSize=10 2024-11-10T10:31:52,626 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,627 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/eccb6f743e07:0, corePoolSize=2, maxPoolSize=2 2024-11-10T10:31:52,627 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,632 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731234742632 2024-11-10T10:31:52,634 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-10T10:31:52,636 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-10T10:31:52,638 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T10:31:52,638 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-10T10:31:52,640 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-10T10:31:52,641 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-10T10:31:52,641 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-10T10:31:52,641 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-10T10:31:52,642 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,645 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-10T10:31:52,646 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-10T10:31:52,647 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,647 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-10T10:31:52,647 INFO [PEWorker-3 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-10T10:31:52,652 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T10:31:52,652 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T10:31:52,652 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-10T10:31:52,652 WARN [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T10:31:52,652 WARN [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T10:31:52,652 WARN [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-10T10:31:52,656 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-10T10:31:52,656 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-10T10:31:52,660 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.large.0-1731234712658,5,FailOnTimeoutGroup] 2024-11-10T10:31:52,662 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.small.0-1731234712660,5,FailOnTimeoutGroup] 2024-11-10T10:31:52,662 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,662 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-10T10:31:52,664 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,664 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741831_1007 (size=1321) 2024-11-10T10:31:52,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741831_1007 (size=1321) 2024-11-10T10:31:52,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741831_1007 (size=1321) 2024-11-10T10:31:52,708 INFO [PEWorker-3 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-10T10:31:52,708 INFO [PEWorker-3 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf 2024-11-10T10:31:52,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741832_1008 (size=32) 2024-11-10T10:31:52,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741832_1008 (size=32) 2024-11-10T10:31:52,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741832_1008 (size=32) 2024-11-10T10:31:52,725 DEBUG [PEWorker-3 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:52,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T10:31:52,731 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T10:31:52,731 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:52,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T10:31:52,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T10:31:52,736 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:52,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T10:31:52,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T10:31:52,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:52,740 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T10:31:52,743 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T10:31:52,743 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:52,744 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:52,744 DEBUG [PEWorker-3 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T10:31:52,751 DEBUG [PEWorker-3 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740 2024-11-10T10:31:52,753 DEBUG [PEWorker-3 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740 2024-11-10T10:31:52,754 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=44191, startcode=1731234711534 2024-11-10T10:31:52,754 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=39345, startcode=1731234711428 2024-11-10T10:31:52,756 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,758 DEBUG [PEWorker-3 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T10:31:52,758 DEBUG [PEWorker-3 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T10:31:52,760 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(517): Registering regionserver=eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,760 DEBUG [PEWorker-3 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T10:31:52,763 DEBUG [PEWorker-3 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T10:31:52,769 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(2659): reportForDuty to master=eccb6f743e07,38215,1731234710743 with port=39867, startcode=1731234711581 2024-11-10T10:31:52,774 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:31:52,775 INFO [PEWorker-3 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59158200, jitterRate=-0.11847412586212158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T10:31:52,778 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,778 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(517): Registering regionserver=eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,779 DEBUG [PEWorker-3 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731234712725Initializing all the Stores at 1731234712727 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234712727Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234712727Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234712727Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234712727Cleaning up temporary data from old regions at 1731234712758 (+31 ms)Region opened successfully at 1731234712779 (+21 ms) 2024-11-10T10:31:52,779 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf 2024-11-10T10:31:52,779 DEBUG [PEWorker-3 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T10:31:52,779 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39939 2024-11-10T10:31:52,779 INFO [PEWorker-3 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T10:31:52,779 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T10:31:52,779 DEBUG [PEWorker-3 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T10:31:52,779 DEBUG [PEWorker-3 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T10:31:52,779 DEBUG [PEWorker-3 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T10:31:52,784 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,784 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38215 {}] master.ServerManager(517): Registering regionserver=eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,785 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T10:31:52,785 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf 2024-11-10T10:31:52,786 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39939 2024-11-10T10:31:52,786 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T10:31:52,787 DEBUG [RS:0;eccb6f743e07:39345 {}] zookeeper.ZKUtil(111): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,788 WARN [RS:0;eccb6f743e07:39345 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T10:31:52,788 INFO [RS:0;eccb6f743e07:39345 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:52,788 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,789 INFO [PEWorker-3 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T10:31:52,790 DEBUG [PEWorker-3 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731234712779Disabling compacts and flushes for region at 1731234712779Disabling writes for close at 1731234712779Writing region close event to WAL at 1731234712789 (+10 ms)Closed at 1731234712789 2024-11-10T10:31:52,792 DEBUG [RS:1;eccb6f743e07:44191 {}] zookeeper.ZKUtil(111): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,792 WARN [RS:1;eccb6f743e07:44191 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T10:31:52,792 INFO [RS:1;eccb6f743e07:44191 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:52,793 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,795 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [eccb6f743e07,39345,1731234711428] 2024-11-10T10:31:52,795 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [eccb6f743e07,44191,1731234711534] 2024-11-10T10:31:52,795 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf 2024-11-10T10:31:52,795 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39939 2024-11-10T10:31:52,795 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-10T10:31:52,796 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T10:31:52,796 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-10T10:31:52,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T10:31:52,802 DEBUG [RS:2;eccb6f743e07:39867 {}] zookeeper.ZKUtil(111): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,803 WARN [RS:2;eccb6f743e07:39867 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-10T10:31:52,804 INFO [RS:2;eccb6f743e07:39867 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:52,804 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,804 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-10T10:31:52,807 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [eccb6f743e07,39867,1731234711581] 2024-11-10T10:31:52,818 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T10:31:52,823 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-10T10:31:52,833 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T10:31:52,833 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T10:31:52,833 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-10T10:31:52,848 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T10:31:52,852 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T10:31:52,861 INFO [RS:0;eccb6f743e07:39345 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T10:31:52,861 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,864 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-10T10:31:52,869 INFO [RS:1;eccb6f743e07:44191 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T10:31:52,869 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,869 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T10:31:52,869 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T10:31:52,870 INFO [RS:2;eccb6f743e07:39867 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T10:31:52,870 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,870 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-10T10:31:52,876 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T10:31:52,876 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T10:31:52,877 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-10T10:31:52,878 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,878 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,878 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,878 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,878 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,878 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0, corePoolSize=2, maxPoolSize=2 2024-11-10T10:31:52,879 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0, corePoolSize=2, maxPoolSize=2 2024-11-10T10:31:52,879 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,879 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0, corePoolSize=2, maxPoolSize=2 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,880 DEBUG [RS:1;eccb6f743e07:44191 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,880 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,881 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,881 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/eccb6f743e07:0, corePoolSize=1, maxPoolSize=1 2024-11-10T10:31:52,881 DEBUG [RS:0;eccb6f743e07:39345 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,881 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,881 DEBUG [RS:2;eccb6f743e07:39867 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/eccb6f743e07:0, corePoolSize=3, maxPoolSize=3 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,889 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,44191,1731234711534-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T10:31:52,892 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,892 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,893 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,893 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,893 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,893 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,39345,1731234711428-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T10:31:52,896 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,896 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,897 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,897 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,897 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,897 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,39867,1731234711581-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T10:31:52,921 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T10:31:52,921 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T10:31:52,923 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,39345,1731234711428-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,923 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,44191,1731234711534-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,924 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-10T10:31:52,924 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,39867,1731234711581-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,924 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,924 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,924 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.Replication(171): eccb6f743e07,39345,1731234711428 started 2024-11-10T10:31:52,924 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.Replication(171): eccb6f743e07,44191,1731234711534 started 2024-11-10T10:31:52,924 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,925 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.Replication(171): eccb6f743e07,39867,1731234711581 started 2024-11-10T10:31:52,948 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,948 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1482): Serving as eccb6f743e07,39345,1731234711428, RpcServer on eccb6f743e07/172.17.0.2:39345, sessionid=0x101016f2c550001 2024-11-10T10:31:52,949 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T10:31:52,949 DEBUG [RS:0;eccb6f743e07:39345 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,950 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,39345,1731234711428' 2024-11-10T10:31:52,950 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T10:31:52,951 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T10:31:52,951 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,951 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1482): Serving as eccb6f743e07,44191,1731234711534, RpcServer on eccb6f743e07/172.17.0.2:44191, sessionid=0x101016f2c550002 2024-11-10T10:31:52,951 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:52,951 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1482): Serving as eccb6f743e07,39867,1731234711581, RpcServer on eccb6f743e07/172.17.0.2:39867, sessionid=0x101016f2c550003 2024-11-10T10:31:52,951 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T10:31:52,952 DEBUG [RS:1;eccb6f743e07:44191 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,952 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T10:31:52,952 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T10:31:52,952 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,44191,1731234711534' 2024-11-10T10:31:52,952 DEBUG [RS:0;eccb6f743e07:39345 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager eccb6f743e07,39345,1731234711428 2024-11-10T10:31:52,952 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T10:31:52,952 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,39345,1731234711428' 2024-11-10T10:31:52,952 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T10:31:52,952 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-10T10:31:52,952 DEBUG [RS:2;eccb6f743e07:39867 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,952 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,39867,1731234711581' 2024-11-10T10:31:52,952 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-10T10:31:52,953 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T10:31:52,953 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T10:31:52,954 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T10:31:52,954 DEBUG [RS:0;eccb6f743e07:39345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T10:31:52,954 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T10:31:52,954 INFO [RS:0;eccb6f743e07:39345 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T10:31:52,954 DEBUG [RS:1;eccb6f743e07:44191 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager eccb6f743e07,44191,1731234711534 2024-11-10T10:31:52,954 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,44191,1731234711534' 2024-11-10T10:31:52,954 INFO [RS:0;eccb6f743e07:39345 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T10:31:52,954 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T10:31:52,955 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T10:31:52,956 DEBUG [RS:1;eccb6f743e07:44191 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T10:31:52,956 INFO [RS:1;eccb6f743e07:44191 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T10:31:52,956 INFO [RS:1;eccb6f743e07:44191 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T10:31:52,956 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-10T10:31:52,957 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-10T10:31:52,957 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-10T10:31:52,957 DEBUG [RS:2;eccb6f743e07:39867 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager eccb6f743e07,39867,1731234711581 2024-11-10T10:31:52,957 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'eccb6f743e07,39867,1731234711581' 2024-11-10T10:31:52,957 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-10T10:31:52,958 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-10T10:31:52,959 DEBUG [RS:2;eccb6f743e07:39867 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-10T10:31:52,959 INFO [RS:2;eccb6f743e07:39867 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-10T10:31:52,959 INFO [RS:2;eccb6f743e07:39867 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-10T10:31:52,974 WARN [eccb6f743e07:38215 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-10T10:31:53,059 INFO [RS:0;eccb6f743e07:39345 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:53,059 INFO [RS:2;eccb6f743e07:39867 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:53,059 INFO [RS:1;eccb6f743e07:44191 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:53,063 INFO [RS:1;eccb6f743e07:44191 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=eccb6f743e07%2C44191%2C1731234711534, suffix=, logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,44191,1731234711534, archiveDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs, maxLogs=32 2024-11-10T10:31:53,063 INFO [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=eccb6f743e07%2C39867%2C1731234711581, suffix=, logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581, archiveDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs, maxLogs=32 2024-11-10T10:31:53,063 INFO [RS:0;eccb6f743e07:39345 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=eccb6f743e07%2C39345%2C1731234711428, suffix=, logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428, archiveDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs, maxLogs=32 2024-11-10T10:31:53,083 DEBUG [RS:0;eccb6f743e07:39345 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428/eccb6f743e07%2C39345%2C1731234711428.1731234713068, exclude list is [], retry=0 2024-11-10T10:31:53,083 DEBUG [RS:1;eccb6f743e07:44191 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,44191,1731234711534/eccb6f743e07%2C44191%2C1731234711534.1731234713068, exclude list is [], retry=0 2024-11-10T10:31:53,083 DEBUG [RS:2;eccb6f743e07:39867 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581/eccb6f743e07%2C39867%2C1731234711581.1731234713068, exclude list is [], retry=0 2024-11-10T10:31:53,088 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:53,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:53,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:53,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:53,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:53,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:53,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:53,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:53,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:53,099 INFO [RS:1;eccb6f743e07:44191 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,44191,1731234711534/eccb6f743e07%2C44191%2C1731234711534.1731234713068 2024-11-10T10:31:53,099 DEBUG [RS:1;eccb6f743e07:44191 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:31:53,106 INFO [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581/eccb6f743e07%2C39867%2C1731234711581.1731234713068 2024-11-10T10:31:53,106 DEBUG [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:53,107 INFO [RS:0;eccb6f743e07:39345 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428/eccb6f743e07%2C39345%2C1731234711428.1731234713068 2024-11-10T10:31:53,107 DEBUG [RS:0;eccb6f743e07:39345 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:53,227 DEBUG [eccb6f743e07:38215 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-10T10:31:53,236 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(204): Hosts are {eccb6f743e07=0} racks are {/default-rack=0} 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T10:31:53,244 INFO [eccb6f743e07:38215 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T10:31:53,244 INFO [eccb6f743e07:38215 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T10:31:53,244 INFO [eccb6f743e07:38215 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T10:31:53,244 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T10:31:53,253 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=eccb6f743e07,39867,1731234711581 2024-11-10T10:31:53,258 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as eccb6f743e07,39867,1731234711581, state=OPENING 2024-11-10T10:31:53,263 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-10T10:31:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:53,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:31:53,267 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,267 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,267 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,267 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,269 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-10T10:31:53,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=eccb6f743e07,39867,1731234711581}] 2024-11-10T10:31:53,447 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T10:31:53,450 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52125, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T10:31:53,463 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-10T10:31:53,463 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:53,464 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-10T10:31:53,468 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=eccb6f743e07%2C39867%2C1731234711581.meta, suffix=.meta, logDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581, archiveDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs, maxLogs=32 2024-11-10T10:31:53,485 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581/eccb6f743e07%2C39867%2C1731234711581.meta.1731234713470.meta, exclude list is [], retry=0 2024-11-10T10:31:53,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:53,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:53,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:53,493 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39867,1731234711581/eccb6f743e07%2C39867%2C1731234711581.meta.1731234713470.meta 2024-11-10T10:31:53,493 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:53,493 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:31:53,495 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-10T10:31:53,498 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-10T10:31:53,503 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-10T10:31:53,508 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-10T10:31:53,508 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:53,508 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-10T10:31:53,508 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-10T10:31:53,512 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-10T10:31:53,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-10T10:31:53,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:53,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:53,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-10T10:31:53,515 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-10T10:31:53,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:53,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:53,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-10T10:31:53,518 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-10T10:31:53,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:53,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:53,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-10T10:31:53,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-10T10:31:53,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:53,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-10T10:31:53,521 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-10T10:31:53,523 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740 2024-11-10T10:31:53,526 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740 2024-11-10T10:31:53,529 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-10T10:31:53,529 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-10T10:31:53,531 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-10T10:31:53,534 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-10T10:31:53,536 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60563352, jitterRate=-0.09753572940826416}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-10T10:31:53,536 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-10T10:31:53,538 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731234713509Writing region info on filesystem at 1731234713509Initializing all the Stores at 1731234713511 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234713511Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234713511Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234713511Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731234713511Cleaning up temporary data from old regions at 1731234713530 (+19 ms)Running coprocessor post-open hooks at 1731234713536 (+6 ms)Region opened successfully at 1731234713538 (+2 ms) 2024-11-10T10:31:53,546 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731234713438 2024-11-10T10:31:53,560 DEBUG [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-10T10:31:53,560 INFO [RS_OPEN_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-10T10:31:53,563 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=eccb6f743e07,39867,1731234711581 2024-11-10T10:31:53,565 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as eccb6f743e07,39867,1731234711581, state=OPEN 2024-11-10T10:31:53,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T10:31:53,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T10:31:53,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T10:31:53,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-10T10:31:53,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,569 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=eccb6f743e07,39867,1731234711581 2024-11-10T10:31:53,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-10T10:31:53,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-10T10:31:53,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=eccb6f743e07,39867,1731234711581 in 299 msec 2024-11-10T10:31:53,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-10T10:31:53,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 773 msec 2024-11-10T10:31:53,585 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-10T10:31:53,585 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-10T10:31:53,610 DEBUG [PEWorker-1 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T10:31:53,611 DEBUG [PEWorker-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=eccb6f743e07,39867,1731234711581, seqNum=-1] 2024-11-10T10:31:53,641 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T10:31:53,643 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46901, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T10:31:53,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1650 sec 2024-11-10T10:31:53,666 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731234713665, completionTime=-1 2024-11-10T10:31:53,669 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-10T10:31:53,669 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-10T10:31:53,697 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-10T10:31:53,697 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731234773697 2024-11-10T10:31:53,697 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731234833697 2024-11-10T10:31:53,697 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-10T10:31:53,699 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-10T10:31:53,707 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,707 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,708 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,709 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-eccb6f743e07:38215, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,710 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,710 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,717 DEBUG [master/eccb6f743e07:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-10T10:31:53,740 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.075sec 2024-11-10T10:31:53,741 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-10T10:31:53,743 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-10T10:31:53,744 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-10T10:31:53,745 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-10T10:31:53,745 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-10T10:31:53,745 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-10T10:31:53,746 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-10T10:31:53,751 DEBUG [master/eccb6f743e07:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-10T10:31:53,752 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-10T10:31:53,753 INFO [master/eccb6f743e07:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=eccb6f743e07,38215,1731234710743-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-10T10:31:53,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e4c2dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T10:31:53,824 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request eccb6f743e07,38215,-1 for getting cluster id 2024-11-10T10:31:53,827 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-10T10:31:53,837 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4ca9019e-9f51-4a88-9744-47822550e251' 2024-11-10T10:31:53,839 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-10T10:31:53,839 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4ca9019e-9f51-4a88-9744-47822550e251" 2024-11-10T10:31:53,842 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b382efb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T10:31:53,842 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [eccb6f743e07,38215,-1] 2024-11-10T10:31:53,845 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-10T10:31:53,847 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:31:53,848 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36176, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-10T10:31:53,850 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fea663b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-10T10:31:53,851 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-10T10:31:53,858 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=eccb6f743e07,39867,1731234711581, seqNum=-1] 2024-11-10T10:31:53,858 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T10:31:53,861 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37184, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T10:31:53,906 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=eccb6f743e07,38215,1731234710743 2024-11-10T10:31:53,906 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:39939/hbase 2024-11-10T10:31:53,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=363, OpenFileDescriptor=611, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=9192 2024-11-10T10:31:53,956 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:53,959 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:53,960 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:53,965 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-13205276, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-13205276, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:53,982 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-13205276/hregion-13205276.1731234713967, exclude list is [], retry=0 2024-11-10T10:31:53,987 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:53,987 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:53,988 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:53,991 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-13205276/hregion-13205276.1731234713967 2024-11-10T10:31:53,991 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:53,991 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => c64e6e2dacd55e453a9b6f205172cbdb, NAME => 'testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:31:54,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741838_1014 (size=64) 2024-11-10T10:31:54,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741838_1014 (size=64) 2024-11-10T10:31:54,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741838_1014 (size=64) 2024-11-10T10:31:54,005 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:54,007 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,010 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c64e6e2dacd55e453a9b6f205172cbdb columnFamilyName a 2024-11-10T10:31:54,010 DEBUG [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:54,010 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] regionserver.HStore(327): Store=c64e6e2dacd55e453a9b6f205172cbdb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:54,011 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,012 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,013 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,013 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,013 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,017 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,021 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:31:54,022 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c64e6e2dacd55e453a9b6f205172cbdb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61487127, jitterRate=-0.08377040922641754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:31:54,023 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c64e6e2dacd55e453a9b6f205172cbdb: Writing region info on filesystem at 1731234714005Initializing all the Stores at 1731234714007 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234714007Cleaning up temporary data from old regions at 1731234714013 (+6 ms)Region opened successfully at 1731234714023 (+10 ms) 2024-11-10T10:31:54,023 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing c64e6e2dacd55e453a9b6f205172cbdb, disabling compactions & flushes 2024-11-10T10:31:54,023 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. 2024-11-10T10:31:54,023 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. 2024-11-10T10:31:54,023 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. after waiting 0 ms 2024-11-10T10:31:54,023 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. 2024-11-10T10:31:54,024 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. 2024-11-10T10:31:54,024 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for c64e6e2dacd55e453a9b6f205172cbdb: Waiting for close lock at 1731234714023Disabling compacts and flushes for region at 1731234714023Disabling writes for close at 1731234714023Writing region close event to WAL at 1731234714024 (+1 ms)Closed at 1731234714024 2024-11-10T10:31:54,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741837_1013 (size=95) 2024-11-10T10:31:54,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741837_1013 (size=95) 2024-11-10T10:31:54,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741837_1013 (size=95) 2024-11-10T10:31:54,036 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:31:54,036 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-13205276:(num 1731234713967) 2024-11-10T10:31:54,038 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-10T10:31:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741839_1015 (size=320) 2024-11-10T10:31:54,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741839_1015 (size=320) 2024-11-10T10:31:54,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741839_1015 (size=320) 2024-11-10T10:31:54,058 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-10T10:31:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741840_1016 (size=253) 2024-11-10T10:31:54,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741840_1016 (size=253) 2024-11-10T10:31:54,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741840_1016 (size=253) 2024-11-10T10:31:54,096 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1, size=320 (320bytes) 2024-11-10T10:31:54,097 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-10T10:31:54,097 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-10T10:31:54,097 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1 2024-11-10T10:31:54,103 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1 after 4ms 2024-11-10T10:31:54,110 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,111 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1 took 16ms 2024-11-10T10:31:54,124 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1 so closing down 2024-11-10T10:31:54,124 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:31:54,127 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-10T10:31:54,129 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp 2024-11-10T10:31:54,130 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:31:54,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741841_1017 (size=320) 2024-11-10T10:31:54,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741841_1017 (size=320) 2024-11-10T10:31:54,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741841_1017 (size=320) 2024-11-10T10:31:54,143 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-10T10:31:54,146 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002 2024-11-10T10:31:54,151 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 34 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-10T10:31:54,151 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1, journal: Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1, size=320 (320bytes) at 1731234714096Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1 so closing down at 1731234714124 (+28 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp at 1731234714129 (+5 ms)3 split writer threads finished at 1731234714130 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731234714143 (+13 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002 at 1731234714146 (+3 ms)Processed 2 edits across 1 Regions in 34 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731234714151 (+5 ms) 2024-11-10T10:31:54,173 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2, size=253 (253bytes) 2024-11-10T10:31:54,173 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2 2024-11-10T10:31:54,174 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2 after 1ms 2024-11-10T10:31:54,178 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,178 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2 took 5ms 2024-11-10T10:31:54,180 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2 so closing down 2024-11-10T10:31:54,180 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:31:54,183 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-10T10:31:54,184 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002-wal-2.temp 2024-11-10T10:31:54,185 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:31:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741842_1018 (size=253) 2024-11-10T10:31:54,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741842_1018 (size=253) 2024-11-10T10:31:54,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741842_1018 (size=253) 2024-11-10T10:31:54,195 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-10T10:31:54,199 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,201 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-11-10T10:31:54,203 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 25 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-10T10:31:54,203 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2, journal: Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2, size=253 (253bytes) at 1731234714173Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2 so closing down at 1731234714180 (+7 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002-wal-2.temp at 1731234714185 (+5 ms)3 split writer threads finished at 1731234714185Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731234714195 (+10 ms)Processed 1 edits across 1 Regions in 25 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731234714203 (+8 ms) 2024-11-10T10:31:54,203 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:31:54,205 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:54,225 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal.1731234714206, exclude list is [], retry=0 2024-11-10T10:31:54,232 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:54,233 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:54,233 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:54,240 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731234713954/wal.1731234714206 2024-11-10T10:31:54,240 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:31:54,240 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => c64e6e2dacd55e453a9b6f205172cbdb, NAME => 'testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:31:54,241 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:54,241 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,241 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,244 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,247 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c64e6e2dacd55e453a9b6f205172cbdb columnFamilyName a 2024-11-10T10:31:54,247 DEBUG [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:54,248 INFO [StoreOpener-c64e6e2dacd55e453a9b6f205172cbdb-1 {}] regionserver.HStore(327): Store=c64e6e2dacd55e453a9b6f205172cbdb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:54,248 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,250 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,254 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,256 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002 2024-11-10T10:31:54,262 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,271 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002 2024-11-10T10:31:54,275 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c64e6e2dacd55e453a9b6f205172cbdb 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-10T10:31:54,330 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/.tmp/a/a3401ec21d8349adb15f0baba837e0b6 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731234714036/Put/seqid=0 2024-11-10T10:31:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741844_1020 (size=5170) 2024-11-10T10:31:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741844_1020 (size=5170) 2024-11-10T10:31:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741844_1020 (size=5170) 2024-11-10T10:31:54,345 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/.tmp/a/a3401ec21d8349adb15f0baba837e0b6 2024-11-10T10:31:54,390 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/.tmp/a/a3401ec21d8349adb15f0baba837e0b6 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/a/a3401ec21d8349adb15f0baba837e0b6 2024-11-10T10:31:54,399 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/a/a3401ec21d8349adb15f0baba837e0b6, entries=2, sequenceid=2, filesize=5.0 K 2024-11-10T10:31:54,404 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for c64e6e2dacd55e453a9b6f205172cbdb in 130ms, sequenceid=2, compaction requested=false; wal=null 2024-11-10T10:31:54,405 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/0000000000000000002 2024-11-10T10:31:54,406 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,406 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,409 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c64e6e2dacd55e453a9b6f205172cbdb 2024-11-10T10:31:54,412 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/c64e6e2dacd55e453a9b6f205172cbdb/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-10T10:31:54,413 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c64e6e2dacd55e453a9b6f205172cbdb; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63536853, jitterRate=-0.05322711169719696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:31:54,414 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c64e6e2dacd55e453a9b6f205172cbdb: Writing region info on filesystem at 1731234714241Initializing all the Stores at 1731234714243 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234714244 (+1 ms)Obtaining lock to block concurrent updates at 1731234714275 (+31 ms)Preparing flush snapshotting stores in c64e6e2dacd55e453a9b6f205172cbdb at 1731234714275Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731234714279 (+4 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1731234713957.c64e6e2dacd55e453a9b6f205172cbdb. at 1731234714279Flushing c64e6e2dacd55e453a9b6f205172cbdb/a: creating writer at 1731234714281 (+2 ms)Flushing c64e6e2dacd55e453a9b6f205172cbdb/a: appending metadata at 1731234714320 (+39 ms)Flushing c64e6e2dacd55e453a9b6f205172cbdb/a: closing flushed file at 1731234714323 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ced8883: reopening flushed file at 1731234714389 (+66 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for c64e6e2dacd55e453a9b6f205172cbdb in 130ms, sequenceid=2, compaction requested=false; wal=null at 1731234714404 (+15 ms)Cleaning up temporary data from old regions at 1731234714406 (+2 ms)Region opened successfully at 1731234714414 (+8 ms) 2024-11-10T10:31:54,440 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=374 (was 363) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41232 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51634 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47174 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51734 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41400 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=703 (was 611) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=237 (was 237), ProcessCount=11 (was 11), AvailableMemoryMB=9172 (was 9192) 2024-11-10T10:31:54,451 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=374, OpenFileDescriptor=703, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=9171 2024-11-10T10:31:54,468 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:54,470 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:54,472 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:54,475 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-70586715, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-70586715, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:54,488 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-70586715/hregion-70586715.1731234714475, exclude list is [], retry=0 2024-11-10T10:31:54,492 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:54,492 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:54,492 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:54,497 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-70586715/hregion-70586715.1731234714475 2024-11-10T10:31:54,498 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:31:54,498 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 14a6c0997b4e757dec2989b6299cdd89, NAME => 'testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:31:54,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741846_1022 (size=64) 2024-11-10T10:31:54,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741846_1022 (size=64) 2024-11-10T10:31:54,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741846_1022 (size=64) 2024-11-10T10:31:54,510 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:54,512 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,514 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14a6c0997b4e757dec2989b6299cdd89 columnFamilyName a 2024-11-10T10:31:54,514 DEBUG [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:54,514 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] regionserver.HStore(327): Store=14a6c0997b4e757dec2989b6299cdd89/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:54,515 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,516 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,516 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,517 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,517 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,519 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,522 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:31:54,522 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14a6c0997b4e757dec2989b6299cdd89; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73634500, jitterRate=0.09723955392837524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14a6c0997b4e757dec2989b6299cdd89: Writing region info on filesystem at 1731234714510Initializing all the Stores at 1731234714511 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234714511Cleaning up temporary data from old regions at 1731234714517 (+6 ms)Region opened successfully at 1731234714523 (+6 ms) 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 14a6c0997b4e757dec2989b6299cdd89, disabling compactions & flushes 2024-11-10T10:31:54,523 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. after waiting 0 ms 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. 2024-11-10T10:31:54,523 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. 2024-11-10T10:31:54,523 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 14a6c0997b4e757dec2989b6299cdd89: Waiting for close lock at 1731234714523Disabling compacts and flushes for region at 1731234714523Disabling writes for close at 1731234714523Writing region close event to WAL at 1731234714523Closed at 1731234714523 2024-11-10T10:31:54,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741845_1021 (size=95) 2024-11-10T10:31:54,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741845_1021 (size=95) 2024-11-10T10:31:54,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741845_1021 (size=95) 2024-11-10T10:31:54,530 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:31:54,530 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-70586715:(num 1731234714475) 2024-11-10T10:31:54,531 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-10T10:31:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741847_1023 (size=320) 2024-11-10T10:31:54,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741847_1023 (size=320) 2024-11-10T10:31:54,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741847_1023 (size=320) 2024-11-10T10:31:54,543 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-10T10:31:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741848_1024 (size=253) 2024-11-10T10:31:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741848_1024 (size=253) 2024-11-10T10:31:54,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741848_1024 (size=253) 2024-11-10T10:31:54,573 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2, size=253 (253bytes) 2024-11-10T10:31:54,573 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2 2024-11-10T10:31:54,574 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2 after 1ms 2024-11-10T10:31:54,577 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,577 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2 took 4ms 2024-11-10T10:31:54,580 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2 so closing down 2024-11-10T10:31:54,580 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:31:54,582 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-10T10:31:54,583 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp 2024-11-10T10:31:54,584 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:31:54,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741849_1025 (size=253) 2024-11-10T10:31:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741849_1025 (size=253) 2024-11-10T10:31:54,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741849_1025 (size=253) 2024-11-10T10:31:54,593 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-10T10:31:54,595 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 2024-11-10T10:31:54,595 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-10T10:31:54,595 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2, journal: Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2, size=253 (253bytes) at 1731234714573Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2 so closing down at 1731234714580 (+7 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp at 1731234714583 (+3 ms)3 split writer threads finished at 1731234714584 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731234714593 (+9 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 at 1731234714595 (+2 ms)Processed 1 edits across 1 Regions in 17 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731234714595 2024-11-10T10:31:54,610 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1, size=320 (320bytes) 2024-11-10T10:31:54,610 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1 2024-11-10T10:31:54,611 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1 after 1ms 2024-11-10T10:31:54,614 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,614 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1 took 4ms 2024-11-10T10:31:54,616 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1 so closing down 2024-11-10T10:31:54,617 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:31:54,618 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-10T10:31:54,620 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp 2024-11-10T10:31:54,621 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:31:54,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741850_1026 (size=320) 2024-11-10T10:31:54,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741850_1026 (size=320) 2024-11-10T10:31:54,629 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-10T10:31:54,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741850_1026 (size=320) 2024-11-10T10:31:54,633 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,635 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002, length=253 2024-11-10T10:31:54,638 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 2024-11-10T10:31:54,638 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-10T10:31:54,638 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1, journal: Splitting hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1, size=320 (320bytes) at 1731234714610Finishing writing output for hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1 so closing down at 1731234714617 (+7 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp at 1731234714620 (+3 ms)3 split writer threads finished at 1731234714621 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731234714629 (+8 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 at 1731234714638 (+9 ms)Processed 2 edits across 1 Regions in 24 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731234714638 2024-11-10T10:31:54,639 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:31:54,641 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:54,655 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal.1731234714641, exclude list is [], retry=0 2024-11-10T10:31:54,659 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:54,659 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:54,660 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:54,662 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731234714467/wal.1731234714641 2024-11-10T10:31:54,662 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:31:54,663 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 14a6c0997b4e757dec2989b6299cdd89, NAME => 'testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:31:54,663 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:54,663 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,663 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,665 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,667 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 14a6c0997b4e757dec2989b6299cdd89 columnFamilyName a 2024-11-10T10:31:54,667 DEBUG [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:54,668 INFO [StoreOpener-14a6c0997b4e757dec2989b6299cdd89-1 {}] regionserver.HStore(327): Store=14a6c0997b4e757dec2989b6299cdd89/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:54,668 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,669 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,671 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,672 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 2024-11-10T10:31:54,677 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:54,680 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 2024-11-10T10:31:54,680 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 14a6c0997b4e757dec2989b6299cdd89 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-10T10:31:54,703 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/.tmp/a/5b203b5b51fb437e9639c6055d183c7a is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731234714530/Put/seqid=0 2024-11-10T10:31:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741852_1028 (size=5170) 2024-11-10T10:31:54,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741852_1028 (size=5170) 2024-11-10T10:31:54,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741852_1028 (size=5170) 2024-11-10T10:31:54,721 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/.tmp/a/5b203b5b51fb437e9639c6055d183c7a 2024-11-10T10:31:54,731 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/.tmp/a/5b203b5b51fb437e9639c6055d183c7a as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/a/5b203b5b51fb437e9639c6055d183c7a 2024-11-10T10:31:54,742 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/a/5b203b5b51fb437e9639c6055d183c7a, entries=2, sequenceid=2, filesize=5.0 K 2024-11-10T10:31:54,743 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 14a6c0997b4e757dec2989b6299cdd89 in 63ms, sequenceid=2, compaction requested=false; wal=null 2024-11-10T10:31:54,744 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/0000000000000000002 2024-11-10T10:31:54,745 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,745 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,747 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 14a6c0997b4e757dec2989b6299cdd89 2024-11-10T10:31:54,750 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/14a6c0997b4e757dec2989b6299cdd89/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-10T10:31:54,751 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 14a6c0997b4e757dec2989b6299cdd89; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68846204, jitterRate=0.025888383388519287}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:31:54,752 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 14a6c0997b4e757dec2989b6299cdd89: Writing region info on filesystem at 1731234714663Initializing all the Stores at 1731234714665 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234714665Obtaining lock to block concurrent updates at 1731234714680 (+15 ms)Preparing flush snapshotting stores in 14a6c0997b4e757dec2989b6299cdd89 at 1731234714680Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731234714680Flushing stores of testReplayEditsWrittenIntoWAL,,1731234714469.14a6c0997b4e757dec2989b6299cdd89. at 1731234714680Flushing 14a6c0997b4e757dec2989b6299cdd89/a: creating writer at 1731234714680Flushing 14a6c0997b4e757dec2989b6299cdd89/a: appending metadata at 1731234714701 (+21 ms)Flushing 14a6c0997b4e757dec2989b6299cdd89/a: closing flushed file at 1731234714701Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5ce8ee15: reopening flushed file at 1731234714730 (+29 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 14a6c0997b4e757dec2989b6299cdd89 in 63ms, sequenceid=2, compaction requested=false; wal=null at 1731234714743 (+13 ms)Cleaning up temporary data from old regions at 1731234714745 (+2 ms)Region opened successfully at 1731234714752 (+7 ms) 2024-11-10T10:31:54,770 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=384 (was 374) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41462 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41232 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51634 [Waiting for operation #19] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51784 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47230 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47016 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=785 (was 703) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=237 (was 237), ProcessCount=11 (was 11), AvailableMemoryMB=9165 (was 9171) 2024-11-10T10:31:54,782 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=384, OpenFileDescriptor=785, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=9164 2024-11-10T10:31:54,798 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:54,800 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:31:54,801 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:31:54,804 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-86931423, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-86931423, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:54,817 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-86931423/hregion-86931423.1731234714804, exclude list is [], retry=0 2024-11-10T10:31:54,821 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:54,821 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:54,821 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:54,824 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-86931423/hregion-86931423.1731234714804 2024-11-10T10:31:54,824 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:31:54,824 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 549ea961fab4adf47f3a9de1901a6781, NAME => 'testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:31:54,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741854_1030 (size=64) 2024-11-10T10:31:54,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741854_1030 (size=64) 2024-11-10T10:31:54,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741854_1030 (size=64) 2024-11-10T10:31:55,237 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:55,239 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,241 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName a 2024-11-10T10:31:55,241 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:55,242 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:55,242 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,244 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName b 2024-11-10T10:31:55,244 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:55,245 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:55,245 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,246 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName c 2024-11-10T10:31:55,247 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:55,247 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:55,247 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,248 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,249 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,250 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,250 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,251 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:31:55,252 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:55,255 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:31:55,255 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 549ea961fab4adf47f3a9de1901a6781; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65277747, jitterRate=-0.02728576958179474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:31:55,256 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 549ea961fab4adf47f3a9de1901a6781: Writing region info on filesystem at 1731234715237Initializing all the Stores at 1731234715239 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234715239Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234715239Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234715239Cleaning up temporary data from old regions at 1731234715250 (+11 ms)Region opened successfully at 1731234715256 (+6 ms) 2024-11-10T10:31:55,256 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 549ea961fab4adf47f3a9de1901a6781, disabling compactions & flushes 2024-11-10T10:31:55,256 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:31:55,256 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:31:55,256 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. after waiting 0 ms 2024-11-10T10:31:55,256 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:31:55,257 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:31:55,257 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 549ea961fab4adf47f3a9de1901a6781: Waiting for close lock at 1731234715256Disabling compacts and flushes for region at 1731234715256Disabling writes for close at 1731234715256Writing region close event to WAL at 1731234715256Closed at 1731234715257 (+1 ms) 2024-11-10T10:31:55,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741853_1029 (size=95) 2024-11-10T10:31:55,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741853_1029 (size=95) 2024-11-10T10:31:55,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741853_1029 (size=95) 2024-11-10T10:31:55,263 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:31:55,263 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-86931423:(num 1731234714804) 2024-11-10T10:31:55,263 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:31:55,265 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:55,279 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, exclude list is [], retry=0 2024-11-10T10:31:55,282 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:55,282 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:55,283 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:55,285 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 2024-11-10T10:31:55,285 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:31:55,479 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, size=0 (0bytes) 2024-11-10T10:31:55,479 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 might be still open, length is 0 2024-11-10T10:31:55,479 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 2024-11-10T10:31:55,480 WARN [IPC Server handler 3 on default port 39939 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-11-10T10:31:55,481 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 after 1ms 2024-11-10T10:31:55,686 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41490 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41490 dst: /127.0.0.1:44741 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44741 remote=/127.0.0.1:41490]. Total timeout mills is 60000, 59758 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:31:55,687 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47280 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:43217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47280 dst: /127.0.0.1:43217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:31:55,688 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51820 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:44251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51820 dst: /127.0.0.1:44251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:31:55,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741855_1032 (size=263633) 2024-11-10T10:31:55,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741855_1032 (size=263633) 2024-11-10T10:31:55,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741855_1032 (size=263633) 2024-11-10T10:31:59,012 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:31:59,060 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-10T10:31:59,482 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 after 4002ms 2024-11-10T10:31:59,486 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:59,487 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 took 4009ms 2024-11-10T10:31:59,492 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731234715265.temp 2024-11-10T10:31:59,494 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp 2024-11-10T10:31:59,615 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265; continuing. 2024-11-10T10:31:59,615 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 so closing down 2024-11-10T10:31:59,616 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:31:59,616 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:31:59,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741856_1033 (size=263641) 2024-11-10T10:31:59,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741856_1033 (size=263641) 2024-11-10T10:31:59,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741856_1033 (size=263641) 2024-11-10T10:31:59,621 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp (wrote 3002 edits, skipped 0 edits in 77 ms) 2024-11-10T10:31:59,623 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 2024-11-10T10:31:59,623 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 135 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, size=0, length=0, corrupted=false, cancelled=false 2024-11-10T10:31:59,623 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, journal: Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, size=0 (0bytes) at 1731234715479Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp at 1731234719494 (+4015 ms)Split 1024 edits, skipped 0 edits. at 1731234719552 (+58 ms)Split 2048 edits, skipped 0 edits. at 1731234719586 (+34 ms)Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 so closing down at 1731234719615 (+29 ms)3 split writer threads finished at 1731234719616 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp (wrote 3002 edits, skipped 0 edits in 77 ms) at 1731234719621 (+5 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000000001-wal.1731234715265.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 at 1731234719623 (+2 ms)Processed 3002 edits across 1 Regions in 135 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265, size=0, length=0, corrupted=false, cancelled=false at 1731234719623 2024-11-10T10:31:59,626 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234715265 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234715265 2024-11-10T10:31:59,627 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 2024-11-10T10:31:59,628 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:31:59,630 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:31:59,652 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234719631, exclude list is [], retry=0 2024-11-10T10:31:59,656 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:31:59,657 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:31:59,657 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:31:59,659 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731234714797/wal.1731234719631 2024-11-10T10:31:59,660 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:31:59,660 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:31:59,662 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,664 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName a 2024-11-10T10:31:59,664 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:59,664 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:59,665 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,666 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName b 2024-11-10T10:31:59,666 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:59,666 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:59,666 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,668 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 549ea961fab4adf47f3a9de1901a6781 columnFamilyName c 2024-11-10T10:31:59,668 DEBUG [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:31:59,668 INFO [StoreOpener-549ea961fab4adf47f3a9de1901a6781-1 {}] regionserver.HStore(327): Store=549ea961fab4adf47f3a9de1901a6781/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:31:59,668 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,669 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,671 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:31:59,672 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 2024-11-10T10:31:59,675 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:31:59,724 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-10T10:32:00,099 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 549ea961fab4adf47f3a9de1901a6781 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-10T10:32:00,136 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/f223230fba0341bdb17bcb99a7d43666 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1731234715291/Put/seqid=0 2024-11-10T10:32:00,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741858_1035 (size=50463) 2024-11-10T10:32:00,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741858_1035 (size=50463) 2024-11-10T10:32:00,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741858_1035 (size=50463) 2024-11-10T10:32:00,146 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/f223230fba0341bdb17bcb99a7d43666 2024-11-10T10:32:00,155 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/f223230fba0341bdb17bcb99a7d43666 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/a/f223230fba0341bdb17bcb99a7d43666 2024-11-10T10:32:00,161 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/a/f223230fba0341bdb17bcb99a7d43666, entries=754, sequenceid=754, filesize=49.3 K 2024-11-10T10:32:00,162 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 549ea961fab4adf47f3a9de1901a6781 in 64ms, sequenceid=754, compaction requested=false; wal=null 2024-11-10T10:32:00,179 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-10T10:32:00,180 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 549ea961fab4adf47f3a9de1901a6781 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-10T10:32:00,187 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/53f832ff40c2466ebb1d52bca8bde7fb is 62, key is testReplayEditsWrittenIntoWAL/a:754/1731234715322/Put/seqid=0 2024-11-10T10:32:00,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741859_1036 (size=20072) 2024-11-10T10:32:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741859_1036 (size=20072) 2024-11-10T10:32:00,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741859_1036 (size=20072) 2024-11-10T10:32:00,196 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/53f832ff40c2466ebb1d52bca8bde7fb 2024-11-10T10:32:00,225 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/7f65140c826e4297a49bee93a2ac14d0 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1731234715357/Put/seqid=0 2024-11-10T10:32:00,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741860_1037 (size=35835) 2024-11-10T10:32:00,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741860_1037 (size=35835) 2024-11-10T10:32:00,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741860_1037 (size=35835) 2024-11-10T10:32:00,236 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/7f65140c826e4297a49bee93a2ac14d0 2024-11-10T10:32:00,244 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/a/53f832ff40c2466ebb1d52bca8bde7fb as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/a/53f832ff40c2466ebb1d52bca8bde7fb 2024-11-10T10:32:00,251 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/a/53f832ff40c2466ebb1d52bca8bde7fb, entries=246, sequenceid=1508, filesize=19.6 K 2024-11-10T10:32:00,254 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/7f65140c826e4297a49bee93a2ac14d0 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/b/7f65140c826e4297a49bee93a2ac14d0 2024-11-10T10:32:00,261 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/b/7f65140c826e4297a49bee93a2ac14d0, entries=508, sequenceid=1508, filesize=35.0 K 2024-11-10T10:32:00,261 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 549ea961fab4adf47f3a9de1901a6781 in 82ms, sequenceid=1508, compaction requested=false; wal=null 2024-11-10T10:32:00,273 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-10T10:32:00,273 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 549ea961fab4adf47f3a9de1901a6781 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-10T10:32:00,280 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/104b266df586480784e56f62b94a13c6 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1731234715380/Put/seqid=0 2024-11-10T10:32:00,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741861_1038 (size=35082) 2024-11-10T10:32:00,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741861_1038 (size=35082) 2024-11-10T10:32:00,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741861_1038 (size=35082) 2024-11-10T10:32:00,293 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/104b266df586480784e56f62b94a13c6 2024-11-10T10:32:00,321 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/6e91dc53870843748c8d7c9cf2caf4d9 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1731234715407/Put/seqid=0 2024-11-10T10:32:00,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741862_1039 (size=20825) 2024-11-10T10:32:00,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741862_1039 (size=20825) 2024-11-10T10:32:00,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741862_1039 (size=20825) 2024-11-10T10:32:00,330 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/6e91dc53870843748c8d7c9cf2caf4d9 2024-11-10T10:32:00,337 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/b/104b266df586480784e56f62b94a13c6 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/b/104b266df586480784e56f62b94a13c6 2024-11-10T10:32:00,343 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/b/104b266df586480784e56f62b94a13c6, entries=492, sequenceid=2262, filesize=34.3 K 2024-11-10T10:32:00,344 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/6e91dc53870843748c8d7c9cf2caf4d9 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/c/6e91dc53870843748c8d7c9cf2caf4d9 2024-11-10T10:32:00,350 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/c/6e91dc53870843748c8d7c9cf2caf4d9, entries=262, sequenceid=2262, filesize=20.3 K 2024-11-10T10:32:00,350 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 549ea961fab4adf47f3a9de1901a6781 in 77ms, sequenceid=2262, compaction requested=false; wal=null 2024-11-10T10:32:00,361 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1731234715442/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:32:00,364 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 2024-11-10T10:32:00,364 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-10T10:32:00,364 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 549ea961fab4adf47f3a9de1901a6781 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-11-10T10:32:00,371 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/e94fcb7dd8454a70ab5c3839590bd70f is 62, key is testReplayEditsWrittenIntoWAL/c:262/1731234715413/Put/seqid=0 2024-11-10T10:32:00,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741863_1040 (size=50301) 2024-11-10T10:32:00,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741863_1040 (size=50301) 2024-11-10T10:32:00,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741863_1040 (size=50301) 2024-11-10T10:32:00,384 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/e94fcb7dd8454a70ab5c3839590bd70f 2024-11-10T10:32:00,393 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e94fcb7dd8454a70ab5c3839590bd70f 2024-11-10T10:32:00,394 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/.tmp/c/e94fcb7dd8454a70ab5c3839590bd70f as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/c/e94fcb7dd8454a70ab5c3839590bd70f 2024-11-10T10:32:00,401 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e94fcb7dd8454a70ab5c3839590bd70f 2024-11-10T10:32:00,401 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/c/e94fcb7dd8454a70ab5c3839590bd70f, entries=739, sequenceid=3002, filesize=49.1 K 2024-11-10T10:32:00,401 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 549ea961fab4adf47f3a9de1901a6781 in 37ms, sequenceid=3002, compaction requested=false; wal=null 2024-11-10T10:32:00,402 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/0000000000000003002 2024-11-10T10:32:00,403 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:32:00,404 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:32:00,404 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-10T10:32:00,406 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 549ea961fab4adf47f3a9de1901a6781 2024-11-10T10:32:00,409 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenIntoWAL/549ea961fab4adf47f3a9de1901a6781/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-11-10T10:32:00,410 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 549ea961fab4adf47f3a9de1901a6781; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59963440, jitterRate=-0.1064751148223877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-10T10:32:00,410 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 549ea961fab4adf47f3a9de1901a6781: Writing region info on filesystem at 1731234719660Initializing all the Stores at 1731234719662 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234719662Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234719662Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234719662Cleaning up temporary data from old regions at 1731234720404 (+742 ms)Region opened successfully at 1731234720410 (+6 ms) 2024-11-10T10:32:00,474 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 549ea961fab4adf47f3a9de1901a6781, disabling compactions & flushes 2024-11-10T10:32:00,474 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:32:00,474 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:32:00,474 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. after waiting 0 ms 2024-11-10T10:32:00,474 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:32:00,477 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731234714799.549ea961fab4adf47f3a9de1901a6781. 2024-11-10T10:32:00,477 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 549ea961fab4adf47f3a9de1901a6781: Waiting for close lock at 1731234720474Disabling compacts and flushes for region at 1731234720474Disabling writes for close at 1731234720474Writing region close event to WAL at 1731234720477 (+3 ms)Closed at 1731234720477 2024-11-10T10:32:00,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741857_1034 (size=95) 2024-11-10T10:32:00,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741857_1034 (size=95) 2024-11-10T10:32:00,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741857_1034 (size=95) 2024-11-10T10:32:00,485 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:00,485 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731234719631) 2024-11-10T10:32:00,506 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=398 (was 384) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:41313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1552928825_22 at /127.0.0.1:47370 [Waiting for operation #16] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1552928825_22 at /127.0.0.1:51846 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42563 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:42563 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:39939 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1552928825_22 at /127.0.0.1:41528 [Waiting for operation #20] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1552928825_22 at /127.0.0.1:47344 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=865 (was 785) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 237), ProcessCount=11 (was 11), AvailableMemoryMB=9105 (was 9164) 2024-11-10T10:32:00,519 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=398, OpenFileDescriptor=865, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=9102 2024-11-10T10:32:00,535 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:00,537 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:00,538 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:00,541 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-64462690, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-64462690, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:00,554 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-64462690/hregion-64462690.1731234720541, exclude list is [], retry=0 2024-11-10T10:32:00,557 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:00,558 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:00,558 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:00,560 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-64462690/hregion-64462690.1731234720541 2024-11-10T10:32:00,561 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:00,562 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 41704dd28349736b94466900f0c357bb, NAME => 'test2727,,1731234720536.41704dd28349736b94466900f0c357bb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741865_1042 (size=43) 2024-11-10T10:32:00,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741865_1042 (size=43) 2024-11-10T10:32:00,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741865_1042 (size=43) 2024-11-10T10:32:00,574 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731234720536.41704dd28349736b94466900f0c357bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:00,576 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,578 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName a 2024-11-10T10:32:00,578 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:00,579 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:00,579 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,580 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName b 2024-11-10T10:32:00,581 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:00,581 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:00,581 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,583 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName c 2024-11-10T10:32:00,583 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:00,584 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:00,584 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,585 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,585 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,586 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,587 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,587 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:00,588 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:00,591 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:00,591 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 41704dd28349736b94466900f0c357bb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64233918, jitterRate=-0.042840033769607544}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:00,592 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 41704dd28349736b94466900f0c357bb: Writing region info on filesystem at 1731234720574Initializing all the Stores at 1731234720575 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234720575Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234720576 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234720576Cleaning up temporary data from old regions at 1731234720587 (+11 ms)Region opened successfully at 1731234720592 (+5 ms) 2024-11-10T10:32:00,592 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 41704dd28349736b94466900f0c357bb, disabling compactions & flushes 2024-11-10T10:32:00,592 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:00,592 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:00,592 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731234720536.41704dd28349736b94466900f0c357bb. after waiting 0 ms 2024-11-10T10:32:00,592 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:00,593 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:00,593 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 41704dd28349736b94466900f0c357bb: Waiting for close lock at 1731234720592Disabling compacts and flushes for region at 1731234720592Disabling writes for close at 1731234720592Writing region close event to WAL at 1731234720593 (+1 ms)Closed at 1731234720593 2024-11-10T10:32:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741864_1041 (size=95) 2024-11-10T10:32:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741864_1041 (size=95) 2024-11-10T10:32:00,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741864_1041 (size=95) 2024-11-10T10:32:00,600 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:00,600 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-64462690:(num 1731234720541) 2024-11-10T10:32:00,600 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:00,602 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:00,621 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, exclude list is [], retry=0 2024-11-10T10:32:00,624 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:00,624 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:00,625 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:00,627 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 2024-11-10T10:32:00,628 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:00,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741866_1043 (size=263359) 2024-11-10T10:32:00,783 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 not finished, retry = 0 2024-11-10T10:32:00,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741866_1043 (size=263359) 2024-11-10T10:32:00,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741866_1043 (size=263359) 2024-11-10T10:32:00,903 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, size=257.2 K (263359bytes) 2024-11-10T10:32:00,903 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 2024-11-10T10:32:00,903 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 after 0ms 2024-11-10T10:32:00,908 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:00,910 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 took 8ms 2024-11-10T10:32:00,916 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731234720602.temp 2024-11-10T10:32:00,918 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp 2024-11-10T10:32:00,980 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 so closing down 2024-11-10T10:32:00,980 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:00,980 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:00,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741867_1044 (size=263359) 2024-11-10T10:32:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741867_1044 (size=263359) 2024-11-10T10:32:00,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741867_1044 (size=263359) 2024-11-10T10:32:00,985 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp (wrote 3000 edits, skipped 0 edits in 41 ms) 2024-11-10T10:32:00,987 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp to hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 2024-11-10T10:32:00,987 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 76 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-11-10T10:32:00,988 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, journal: Splitting hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, size=257.2 K (263359bytes) at 1731234720903Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp at 1731234720918 (+15 ms)Split 1024 edits, skipped 0 edits. at 1731234720935 (+17 ms)Split 2048 edits, skipped 0 edits. at 1731234720957 (+22 ms)Finishing writing output for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 so closing down at 1731234720980 (+23 ms)3 split writer threads finished at 1731234720980Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp (wrote 3000 edits, skipped 0 edits in 41 ms) at 1731234720985 (+5 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000000001-wal.1731234720602.temp to hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 at 1731234720987 (+2 ms)Processed 3000 edits across 1 Regions in 76 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1731234720988 (+1 ms) 2024-11-10T10:32:00,991 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720602 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234720602 2024-11-10T10:32:00,992 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 2024-11-10T10:32:00,993 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:00,995 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:01,015 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, exclude list is [], retry=0 2024-11-10T10:32:01,027 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:01,027 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:01,028 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:01,033 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 2024-11-10T10:32:01,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:01,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T10:32:01,163 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:01,166 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T10:32:01,166 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-10T10:32:01,166 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T10:32:01,166 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:01,168 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-11-10T10:32:01,168 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741868_1045 (size=263486) 2024-11-10T10:32:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741868_1045 (size=263486) 2024-11-10T10:32:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741868_1045 (size=263486) 2024-11-10T10:32:01,223 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, size=257.3 K (263486bytes) 2024-11-10T10:32:01,223 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 2024-11-10T10:32:01,224 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 after 1ms 2024-11-10T10:32:01,226 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:01,228 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 took 5ms 2024-11-10T10:32:01,232 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1731234720996.temp 2024-11-10T10:32:01,234 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp 2024-11-10T10:32:01,292 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 so closing down 2024-11-10T10:32:01,292 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:01,293 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741869_1046 (size=263486) 2024-11-10T10:32:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741869_1046 (size=263486) 2024-11-10T10:32:01,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741869_1046 (size=263486) 2024-11-10T10:32:01,297 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp (wrote 3000 edits, skipped 0 edits in 44 ms) 2024-11-10T10:32:01,299 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp to hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 2024-11-10T10:32:01,299 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 70 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-11-10T10:32:01,299 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, journal: Splitting hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, size=257.3 K (263486bytes) at 1731234721223Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp at 1731234721234 (+11 ms)Split 1024 edits, skipped 0 edits. at 1731234721251 (+17 ms)Split 2048 edits, skipped 0 edits. at 1731234721273 (+22 ms)Finishing writing output for hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 so closing down at 1731234721292 (+19 ms)3 split writer threads finished at 1731234721293 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp (wrote 3000 edits, skipped 0 edits in 44 ms) at 1731234721297 (+4 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003001-wal.1731234720996.temp to hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 at 1731234721299 (+2 ms)Processed 3000 edits across 1 Regions in 70 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1731234721299 2024-11-10T10:32:01,301 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234720996 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234720996 2024-11-10T10:32:01,302 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 2024-11-10T10:32:01,302 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:01,304 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/test2727-manual,16010,1731234720534, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:01,321 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234721305, exclude list is [], retry=0 2024-11-10T10:32:01,324 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:01,325 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:01,325 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:01,327 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731234720534/wal.1731234721305 2024-11-10T10:32:01,328 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:01,328 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 41704dd28349736b94466900f0c357bb, NAME => 'test2727,,1731234720536.41704dd28349736b94466900f0c357bb.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:01,328 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731234720536.41704dd28349736b94466900f0c357bb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:01,329 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,329 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,330 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,331 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName a 2024-11-10T10:32:01,331 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:01,332 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:01,332 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,333 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName b 2024-11-10T10:32:01,333 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:01,334 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:01,334 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,335 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41704dd28349736b94466900f0c357bb columnFamilyName c 2024-11-10T10:32:01,335 DEBUG [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:01,336 INFO [StoreOpener-41704dd28349736b94466900f0c357bb-1 {}] regionserver.HStore(327): Store=41704dd28349736b94466900f0c357bb/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:01,336 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,337 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,339 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,340 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 2024-11-10T10:32:01,342 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:01,392 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 2024-11-10T10:32:01,394 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 2024-11-10T10:32:01,397 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:01,452 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 2024-11-10T10:32:01,452 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 41704dd28349736b94466900f0c357bb 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-11-10T10:32:01,489 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/a/714831c0bf204a55a0e2bdbc1a360e6b is 41, key is test2727/a:100/1731234721039/Put/seqid=0 2024-11-10T10:32:01,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741871_1048 (size=84227) 2024-11-10T10:32:01,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741871_1048 (size=84227) 2024-11-10T10:32:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741871_1048 (size=84227) 2024-11-10T10:32:01,503 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/a/714831c0bf204a55a0e2bdbc1a360e6b 2024-11-10T10:32:01,542 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/b/62207b82e03b460483e4125fc2178474 is 41, key is test2727/b:100/1731234721110/Put/seqid=0 2024-11-10T10:32:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741872_1049 (size=84609) 2024-11-10T10:32:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741872_1049 (size=84609) 2024-11-10T10:32:01,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741872_1049 (size=84609) 2024-11-10T10:32:01,554 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/b/62207b82e03b460483e4125fc2178474 2024-11-10T10:32:01,594 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/c/d3a1e17b81b642048aa00727e52f6623 is 41, key is test2727/c:100/1731234721157/Put/seqid=0 2024-11-10T10:32:01,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741873_1050 (size=84609) 2024-11-10T10:32:01,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741873_1050 (size=84609) 2024-11-10T10:32:01,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741873_1050 (size=84609) 2024-11-10T10:32:01,624 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/c/d3a1e17b81b642048aa00727e52f6623 2024-11-10T10:32:01,633 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/a/714831c0bf204a55a0e2bdbc1a360e6b as hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/a/714831c0bf204a55a0e2bdbc1a360e6b 2024-11-10T10:32:01,642 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/a/714831c0bf204a55a0e2bdbc1a360e6b, entries=2000, sequenceid=6000, filesize=82.3 K 2024-11-10T10:32:01,644 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/b/62207b82e03b460483e4125fc2178474 as hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/b/62207b82e03b460483e4125fc2178474 2024-11-10T10:32:01,654 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/b/62207b82e03b460483e4125fc2178474, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-10T10:32:01,655 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/.tmp/c/d3a1e17b81b642048aa00727e52f6623 as hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/c/d3a1e17b81b642048aa00727e52f6623 2024-11-10T10:32:01,663 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/c/d3a1e17b81b642048aa00727e52f6623, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-10T10:32:01,664 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 41704dd28349736b94466900f0c357bb in 211ms, sequenceid=6000, compaction requested=false; wal=null 2024-11-10T10:32:01,665 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000003000 2024-11-10T10:32:01,665 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/0000000000000006000 2024-11-10T10:32:01,667 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,667 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,668 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:01,670 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 41704dd28349736b94466900f0c357bb 2024-11-10T10:32:01,673 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/test2727/41704dd28349736b94466900f0c357bb/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-11-10T10:32:01,674 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 41704dd28349736b94466900f0c357bb; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70250206, jitterRate=0.046809643507003784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:01,676 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 41704dd28349736b94466900f0c357bb: Writing region info on filesystem at 1731234721329Initializing all the Stores at 1731234721330 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234721330Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234721330Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234721330Obtaining lock to block concurrent updates at 1731234721452 (+122 ms)Preparing flush snapshotting stores in 41704dd28349736b94466900f0c357bb at 1731234721452Finished memstore snapshotting test2727,,1731234720536.41704dd28349736b94466900f0c357bb., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1731234721452Flushing stores of test2727,,1731234720536.41704dd28349736b94466900f0c357bb. at 1731234721452Flushing 41704dd28349736b94466900f0c357bb/a: creating writer at 1731234721452Flushing 41704dd28349736b94466900f0c357bb/a: appending metadata at 1731234721488 (+36 ms)Flushing 41704dd28349736b94466900f0c357bb/a: closing flushed file at 1731234721488Flushing 41704dd28349736b94466900f0c357bb/b: creating writer at 1731234721512 (+24 ms)Flushing 41704dd28349736b94466900f0c357bb/b: appending metadata at 1731234721539 (+27 ms)Flushing 41704dd28349736b94466900f0c357bb/b: closing flushed file at 1731234721539Flushing 41704dd28349736b94466900f0c357bb/c: creating writer at 1731234721562 (+23 ms)Flushing 41704dd28349736b94466900f0c357bb/c: appending metadata at 1731234721592 (+30 ms)Flushing 41704dd28349736b94466900f0c357bb/c: closing flushed file at 1731234721592Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7772fcc4: reopening flushed file at 1731234721631 (+39 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4218e9c1: reopening flushed file at 1731234721642 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3033f05e: reopening flushed file at 1731234721654 (+12 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 41704dd28349736b94466900f0c357bb in 211ms, sequenceid=6000, compaction requested=false; wal=null at 1731234721664 (+10 ms)Cleaning up temporary data from old regions at 1731234721667 (+3 ms)Region opened successfully at 1731234721675 (+8 ms) 2024-11-10T10:32:01,677 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-11-10T10:32:01,677 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 41704dd28349736b94466900f0c357bb, disabling compactions & flushes 2024-11-10T10:32:01,677 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:01,677 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:01,677 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731234720536.41704dd28349736b94466900f0c357bb. after waiting 0 ms 2024-11-10T10:32:01,677 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:01,679 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731234720536.41704dd28349736b94466900f0c357bb. 2024-11-10T10:32:01,679 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 41704dd28349736b94466900f0c357bb: Waiting for close lock at 1731234721677Disabling compacts and flushes for region at 1731234721677Disabling writes for close at 1731234721677Writing region close event to WAL at 1731234721679 (+2 ms)Closed at 1731234721679 2024-11-10T10:32:01,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741870_1047 (size=95) 2024-11-10T10:32:01,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741870_1047 (size=95) 2024-11-10T10:32:01,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741870_1047 (size=95) 2024-11-10T10:32:01,688 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:01,688 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731234721305) 2024-11-10T10:32:01,706 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=402 (was 398) Potentially hanging thread: AsyncFSWAL-14-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:50088 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:43026 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:54810 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-14-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=927 (was 865) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 218), ProcessCount=11 (was 11), AvailableMemoryMB=8882 (was 9102) 2024-11-10T10:32:01,720 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=402, OpenFileDescriptor=927, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=8881 2024-11-10T10:32:01,738 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:01,745 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:01,746 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1731234721746 2024-11-10T10:32:01,753 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 2024-11-10T10:32:01,755 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:01,757 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => b56d901ab17c9f32fd475aedce687421, NAME => 'testSequentialEditLogSeqNum,,1731234721738.b56d901ab17c9f32fd475aedce687421.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:01,757 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1731234721738.b56d901ab17c9f32fd475aedce687421.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:01,757 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,757 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,758 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421 doesn't exist for region: b56d901ab17c9f32fd475aedce687421 on table testSequentialEditLogSeqNum 2024-11-10T10:32:01,759 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: b56d901ab17c9f32fd475aedce687421 on table testSequentialEditLogSeqNum 2024-11-10T10:32:01,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741875_1052 (size=62) 2024-11-10T10:32:01,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741875_1052 (size=62) 2024-11-10T10:32:01,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741875_1052 (size=62) 2024-11-10T10:32:01,771 INFO [StoreOpener-b56d901ab17c9f32fd475aedce687421-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,772 INFO [StoreOpener-b56d901ab17c9f32fd475aedce687421-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b56d901ab17c9f32fd475aedce687421 columnFamilyName a 2024-11-10T10:32:01,773 DEBUG [StoreOpener-b56d901ab17c9f32fd475aedce687421-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:01,773 INFO [StoreOpener-b56d901ab17c9f32fd475aedce687421-1 {}] regionserver.HStore(327): Store=b56d901ab17c9f32fd475aedce687421/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:01,773 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,774 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,774 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,775 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,775 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,777 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for b56d901ab17c9f32fd475aedce687421 2024-11-10T10:32:01,780 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:01,780 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened b56d901ab17c9f32fd475aedce687421; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61161322, jitterRate=-0.08862528204917908}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:32:01,782 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for b56d901ab17c9f32fd475aedce687421: Writing region info on filesystem at 1731234721757Initializing all the Stores at 1731234721770 (+13 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234721771 (+1 ms)Cleaning up temporary data from old regions at 1731234721775 (+4 ms)Region opened successfully at 1731234721781 (+6 ms) 2024-11-10T10:32:01,796 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing b56d901ab17c9f32fd475aedce687421 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-11-10T10:32:01,820 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/.tmp/a/950f9c5eb9ca47c189487245b1b9ad6c is 81, key is testSequentialEditLogSeqNum/a:x0/1731234721782/Put/seqid=0 2024-11-10T10:32:01,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741876_1053 (size=5833) 2024-11-10T10:32:01,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741876_1053 (size=5833) 2024-11-10T10:32:01,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741876_1053 (size=5833) 2024-11-10T10:32:01,830 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/.tmp/a/950f9c5eb9ca47c189487245b1b9ad6c 2024-11-10T10:32:01,837 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/.tmp/a/950f9c5eb9ca47c189487245b1b9ad6c as hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/a/950f9c5eb9ca47c189487245b1b9ad6c 2024-11-10T10:32:01,843 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/a/950f9c5eb9ca47c189487245b1b9ad6c, entries=10, sequenceid=13, filesize=5.7 K 2024-11-10T10:32:01,844 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for b56d901ab17c9f32fd475aedce687421 in 49ms, sequenceid=13, compaction requested=false 2024-11-10T10:32:01,845 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for b56d901ab17c9f32fd475aedce687421: 2024-11-10T10:32:01,850 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T10:32:01,850 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T10:32:01,851 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T10:32:01,851 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T10:32:01,851 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-10T10:32:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741874_1051 (size=1844) 2024-11-10T10:32:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741874_1051 (size=1844) 2024-11-10T10:32:01,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741874_1051 (size=1844) 2024-11-10T10:32:01,872 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746, size=1.8 K (1844bytes) 2024-11-10T10:32:01,872 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 2024-11-10T10:32:01,872 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 after 0ms 2024-11-10T10:32:01,875 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:01,876 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 took 5ms 2024-11-10T10:32:01,878 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 so closing down 2024-11-10T10:32:01,878 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:01,880 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731234721746.temp 2024-11-10T10:32:01,881 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp 2024-11-10T10:32:01,882 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741877_1054 (size=1477) 2024-11-10T10:32:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741877_1054 (size=1477) 2024-11-10T10:32:01,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741877_1054 (size=1477) 2024-11-10T10:32:01,892 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:01,893 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp to hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000020 2024-11-10T10:32:01,894 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-11-10T10:32:01,894 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746, journal: Splitting hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746, size=1.8 K (1844bytes) at 1731234721872Finishing writing output for hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746 so closing down at 1731234721878 (+6 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp at 1731234721881 (+3 ms)3 split writer threads finished at 1731234721882 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1731234721892 (+10 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000003-wal.1731234721746.temp to hdfs://localhost:39939/hbase/data/default/testSequentialEditLogSeqNum/b56d901ab17c9f32fd475aedce687421/recovered.edits/0000000000000000020 at 1731234721893 (+1 ms)Processed 17 edits across 1 Regions in 18 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731234721737/wal.1731234721746, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1731234721894 (+1 ms) 2024-11-10T10:32:01,912 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=407 (was 402) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:50088 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:43026 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=963 (was 927) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=218 (was 218), ProcessCount=11 (was 11), AvailableMemoryMB=8860 (was 8881) 2024-11-10T10:32:01,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=407, OpenFileDescriptor=963, MaxFileDescriptor=1048576, SystemLoadAverage=218, ProcessCount=11, AvailableMemoryMB=8859 2024-11-10T10:32:01,952 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:01,956 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:01,985 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:01,989 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-89397277, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-89397277, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:02,013 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-89397277/hregion-89397277.1731234721990, exclude list is [], retry=0 2024-11-10T10:32:02,017 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:02,017 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:02,018 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:02,020 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-89397277/hregion-89397277.1731234721990 2024-11-10T10:32:02,021 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:02,021 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => cda9c0264d4c296c231154f141bdeb5e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741879_1056 (size=70) 2024-11-10T10:32:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741879_1056 (size=70) 2024-11-10T10:32:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741879_1056 (size=70) 2024-11-10T10:32:02,069 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:02,070 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,072 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName a 2024-11-10T10:32:02,072 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,073 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,073 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,074 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName b 2024-11-10T10:32:02,074 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,075 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,075 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,076 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName c 2024-11-10T10:32:02,076 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,077 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,077 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,078 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,078 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,079 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,079 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,080 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:02,081 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,083 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:02,083 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cda9c0264d4c296c231154f141bdeb5e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72895391, jitterRate=0.0862259715795517}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:02,084 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cda9c0264d4c296c231154f141bdeb5e: Writing region info on filesystem at 1731234722069Initializing all the Stores at 1731234722070 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722070Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722070Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722070Cleaning up temporary data from old regions at 1731234722079 (+9 ms)Region opened successfully at 1731234722084 (+5 ms) 2024-11-10T10:32:02,084 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing cda9c0264d4c296c231154f141bdeb5e, disabling compactions & flushes 2024-11-10T10:32:02,084 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:02,085 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:02,085 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. after waiting 0 ms 2024-11-10T10:32:02,085 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:02,085 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:02,085 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for cda9c0264d4c296c231154f141bdeb5e: Waiting for close lock at 1731234722084Disabling compacts and flushes for region at 1731234722084Disabling writes for close at 1731234722085 (+1 ms)Writing region close event to WAL at 1731234722085Closed at 1731234722085 2024-11-10T10:32:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741878_1055 (size=95) 2024-11-10T10:32:02,089 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-89397277/hregion-89397277.1731234721990 not finished, retry = 0 2024-11-10T10:32:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741878_1055 (size=95) 2024-11-10T10:32:02,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741878_1055 (size=95) 2024-11-10T10:32:02,192 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:02,192 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-89397277:(num 1731234721990) 2024-11-10T10:32:02,193 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:02,195 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:02,214 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, exclude list is [], retry=0 2024-11-10T10:32:02,218 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:02,218 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:02,219 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:02,221 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 2024-11-10T10:32:02,221 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:02,222 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => cda9c0264d4c296c231154f141bdeb5e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:02,222 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:02,222 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,222 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,224 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,225 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName a 2024-11-10T10:32:02,225 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,226 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,226 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,227 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName b 2024-11-10T10:32:02,227 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,227 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,227 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,228 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName c 2024-11-10T10:32:02,228 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:02,229 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:02,229 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,230 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,231 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,232 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,232 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,233 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:02,234 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:02,235 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cda9c0264d4c296c231154f141bdeb5e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68961717, jitterRate=0.02760966122150421}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:02,236 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cda9c0264d4c296c231154f141bdeb5e: Writing region info on filesystem at 1731234722222Initializing all the Stores at 1731234722223 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722223Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722224 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234722224Cleaning up temporary data from old regions at 1731234722232 (+8 ms)Region opened successfully at 1731234722236 (+4 ms) 2024-11-10T10:32:02,240 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1731234722239/Put/seqid=0 2024-11-10T10:32:02,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741881_1058 (size=4826) 2024-11-10T10:32:02,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741881_1058 (size=4826) 2024-11-10T10:32:02,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741881_1058 (size=4826) 2024-11-10T10:32:02,252 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39939/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in cda9c0264d4c296c231154f141bdeb5e/a 2024-11-10T10:32:02,262 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-11-10T10:32:02,262 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-10T10:32:02,262 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for cda9c0264d4c296c231154f141bdeb5e: 2024-11-10T10:32:02,264 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_ 2024-11-10T10:32:02,265 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39939/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into cda9c0264d4c296c231154f141bdeb5e/a as hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_ - updating store file list. 2024-11-10T10:32:02,270 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:02,270 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_ into cda9c0264d4c296c231154f141bdeb5e/a 2024-11-10T10:32:02,271 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39939/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into cda9c0264d4c296c231154f141bdeb5e/a (new location: hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_) 2024-11-10T10:32:02,317 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, size=0 (0bytes) 2024-11-10T10:32:02,317 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 might be still open, length is 0 2024-11-10T10:32:02,317 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 2024-11-10T10:32:02,317 WARN [IPC Server handler 0 on default port 39939 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-11-10T10:32:02,318 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 after 1ms 2024-11-10T10:32:04,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:54890 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:43217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54890 dst: /127.0.0.1:43217 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:43217 remote=/127.0.0.1:54890]. Total timeout mills is 60000, 57599 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:04,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:43114 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43114 dst: /127.0.0.1:44251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:04,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:50202 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50202 dst: /127.0.0.1:44741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741880_1059 (size=473) 2024-11-10T10:32:04,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741880_1059 (size=473) 2024-11-10T10:32:06,318 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 after 4001ms 2024-11-10T10:32:06,322 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:06,322 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 took 4005ms 2024-11-10T10:32:06,324 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196; continuing. 2024-11-10T10:32:06,324 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 so closing down 2024-11-10T10:32:06,324 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:06,326 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1731234722196.temp 2024-11-10T10:32:06,327 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp 2024-11-10T10:32:06,328 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741882_1060 (size=259) 2024-11-10T10:32:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741882_1060 (size=259) 2024-11-10T10:32:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741882_1060 (size=259) 2024-11-10T10:32:06,339 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:06,341 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp to hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 2024-11-10T10:32:06,341 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, size=0, length=0, corrupted=false, cancelled=false 2024-11-10T10:32:06,341 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, journal: Splitting hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, size=0 (0bytes) at 1731234722317Finishing writing output for hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 so closing down at 1731234726324 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp at 1731234726327 (+3 ms)3 split writer threads finished at 1731234726328 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731234726339 (+11 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005-wal.1731234722196.temp to hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 at 1731234726341 (+2 ms)Processed 2 edits across 1 Regions in 18 ms; skipped=1; WAL=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196, size=0, length=0, corrupted=false, cancelled=false at 1731234726341 2024-11-10T10:32:06,343 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234722196 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234722196 2024-11-10T10:32:06,344 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 2024-11-10T10:32:06,344 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:06,345 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:06,359 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234726346, exclude list is [], retry=0 2024-11-10T10:32:06,361 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:06,362 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:06,362 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:06,364 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731234721951/wal.1731234726346 2024-11-10T10:32:06,364 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:06,364 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => cda9c0264d4c296c231154f141bdeb5e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:06,365 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:06,365 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,365 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,366 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,367 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName a 2024-11-10T10:32:06,367 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,374 DEBUG [StoreFileOpener-cda9c0264d4c296c231154f141bdeb5e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:06,374 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/7d10f94d1e9b444aa5b896f438b0da76_SeqId_3_ 2024-11-10T10:32:06,374 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:06,374 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,375 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName b 2024-11-10T10:32:06,375 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,376 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:06,376 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,377 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cda9c0264d4c296c231154f141bdeb5e columnFamilyName c 2024-11-10T10:32:06,377 DEBUG [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,377 INFO [StoreOpener-cda9c0264d4c296c231154f141bdeb5e-1 {}] regionserver.HStore(327): Store=cda9c0264d4c296c231154f141bdeb5e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:06,377 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,378 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,379 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,380 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 2024-11-10T10:32:06,382 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:06,383 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 2024-11-10T10:32:06,383 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing cda9c0264d4c296c231154f141bdeb5e 3/3 column families, dataSize=58 B heapSize=904 B 2024-11-10T10:32:06,398 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/.tmp/a/32dcca508e8c465abad234e64621fdfb is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1731234722276/Put/seqid=0 2024-11-10T10:32:06,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741884_1062 (size=5149) 2024-11-10T10:32:06,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741884_1062 (size=5149) 2024-11-10T10:32:06,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741884_1062 (size=5149) 2024-11-10T10:32:06,405 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/.tmp/a/32dcca508e8c465abad234e64621fdfb 2024-11-10T10:32:06,410 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/.tmp/a/32dcca508e8c465abad234e64621fdfb as hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/32dcca508e8c465abad234e64621fdfb 2024-11-10T10:32:06,416 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/a/32dcca508e8c465abad234e64621fdfb, entries=1, sequenceid=5, filesize=5.0 K 2024-11-10T10:32:06,416 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for cda9c0264d4c296c231154f141bdeb5e in 33ms, sequenceid=5, compaction requested=false; wal=null 2024-11-10T10:32:06,417 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/0000000000000000005 2024-11-10T10:32:06,418 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,418 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,419 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:06,421 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for cda9c0264d4c296c231154f141bdeb5e 2024-11-10T10:32:06,423 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/cda9c0264d4c296c231154f141bdeb5e/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-11-10T10:32:06,423 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened cda9c0264d4c296c231154f141bdeb5e; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70308041, jitterRate=0.04767145216464996}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:06,424 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for cda9c0264d4c296c231154f141bdeb5e: Writing region info on filesystem at 1731234726365Initializing all the Stores at 1731234726366 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234726366Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234726366Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234726366Obtaining lock to block concurrent updates at 1731234726383 (+17 ms)Preparing flush snapshotting stores in cda9c0264d4c296c231154f141bdeb5e at 1731234726383Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1731234726383Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. at 1731234726383Flushing cda9c0264d4c296c231154f141bdeb5e/a: creating writer at 1731234726383Flushing cda9c0264d4c296c231154f141bdeb5e/a: appending metadata at 1731234726398 (+15 ms)Flushing cda9c0264d4c296c231154f141bdeb5e/a: closing flushed file at 1731234726398Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@80c063b: reopening flushed file at 1731234726410 (+12 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for cda9c0264d4c296c231154f141bdeb5e in 33ms, sequenceid=5, compaction requested=false; wal=null at 1731234726416 (+6 ms)Cleaning up temporary data from old regions at 1731234726418 (+2 ms)Region opened successfully at 1731234726424 (+6 ms) 2024-11-10T10:32:06,428 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing cda9c0264d4c296c231154f141bdeb5e, disabling compactions & flushes 2024-11-10T10:32:06,428 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:06,428 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:06,428 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. after waiting 0 ms 2024-11-10T10:32:06,428 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:06,429 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731234721953.cda9c0264d4c296c231154f141bdeb5e. 2024-11-10T10:32:06,429 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for cda9c0264d4c296c231154f141bdeb5e: Waiting for close lock at 1731234726428Disabling compacts and flushes for region at 1731234726428Disabling writes for close at 1731234726428Writing region close event to WAL at 1731234726429 (+1 ms)Closed at 1731234726429 2024-11-10T10:32:06,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741883_1061 (size=95) 2024-11-10T10:32:06,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741883_1061 (size=95) 2024-11-10T10:32:06,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741883_1061 (size=95) 2024-11-10T10:32:06,434 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:06,434 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731234726346) 2024-11-10T10:32:06,451 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=411 (was 407) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:39939 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111575503_22 at /127.0.0.1:43140 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111575503_22 at /127.0.0.1:50218 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2111575503_22 at /127.0.0.1:54926 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1023 (was 963) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=216 (was 218), ProcessCount=11 (was 11), AvailableMemoryMB=8799 (was 8859) 2024-11-10T10:32:06,463 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=411, OpenFileDescriptor=1023, MaxFileDescriptor=1048576, SystemLoadAverage=216, ProcessCount=11, AvailableMemoryMB=8798 2024-11-10T10:32:06,478 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:06,483 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-10T10:32:06,487 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is eccb6f743e07,38215,1731234710743 2024-11-10T10:32:06,489 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4d23b236 2024-11-10T10:32:06,490 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-10T10:32:06,492 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58796, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-10T10:32:06,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-10T10:32:06,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-11-10T10:32:06,507 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-11-10T10:32:06,509 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-11-10T10:32:06,509 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,511 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-10T10:32:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T10:32:06,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741885_1063 (size=694) 2024-11-10T10:32:06,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741885_1063 (size=694) 2024-11-10T10:32:06,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741885_1063 (size=694) 2024-11-10T10:32:06,530 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf 2024-11-10T10:32:06,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741886_1064 (size=77) 2024-11-10T10:32:06,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741886_1064 (size=77) 2024-11-10T10:32:06,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741886_1064 (size=77) 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:06,544 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,544 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,544 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234726544Disabling compacts and flushes for region at 1731234726544Disabling writes for close at 1731234726544Writing region close event to WAL at 1731234726544Closed at 1731234726544 2024-11-10T10:32:06,546 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-11-10T10:32:06,551 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1731234726546"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731234726546"}]},"ts":"1731234726546"} 2024-11-10T10:32:06,555 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-10T10:32:06,557 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-10T10:32:06,560 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731234726557"}]},"ts":"1731234726557"} 2024-11-10T10:32:06,564 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-11-10T10:32:06,565 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {eccb6f743e07=0} racks are {/default-rack=0} 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-10T10:32:06,566 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T10:32:06,566 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T10:32:06,566 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-10T10:32:06,566 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T10:32:06,567 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN}] 2024-11-10T10:32:06,569 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN 2024-11-10T10:32:06,571 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN; state=OFFLINE, location=eccb6f743e07,39345,1731234711428; forceNewPlan=false, retain=false 2024-11-10T10:32:06,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T10:32:06,723 INFO [eccb6f743e07:38215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T10:32:06,724 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPENING, regionLocation=eccb6f743e07,39345,1731234711428 2024-11-10T10:32:06,727 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN because future has completed 2024-11-10T10:32:06,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428}] 2024-11-10T10:32:06,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T10:32:06,881 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T10:32:06,883 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48527, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T10:32:06,888 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,888 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:06,889 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,889 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:06,889 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,889 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,890 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,892 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:06,892 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,892 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:06,892 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,894 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:06,894 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:06,894 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:06,894 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,895 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,895 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,896 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,896 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,897 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-10T10:32:06,898 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,900 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:06,901 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 298cceebf7d48f429a5ffead936f01b2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70688825, jitterRate=0.05334557592868805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-10T10:32:06,901 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:06,901 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 298cceebf7d48f429a5ffead936f01b2: Running coprocessor pre-open hook at 1731234726889Writing region info on filesystem at 1731234726889Initializing all the Stores at 1731234726890 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234726890Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234726890Cleaning up temporary data from old regions at 1731234726896 (+6 ms)Running coprocessor post-open hooks at 1731234726901 (+5 ms)Region opened successfully at 1731234726901 2024-11-10T10:32:06,903 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., pid=6, masterSystemTime=1731234726880 2024-11-10T10:32:06,906 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,906 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:06,907 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPEN, openSeqNum=2, regionLocation=eccb6f743e07,39345,1731234711428 2024-11-10T10:32:06,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 because future has completed 2024-11-10T10:32:06,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-10T10:32:06,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 in 183 msec 2024-11-10T10:32:06,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-10T10:32:06,917 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN in 347 msec 2024-11-10T10:32:06,919 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-10T10:32:06,919 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731234726919"}]},"ts":"1731234726919"} 2024-11-10T10:32:06,921 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-11-10T10:32:06,922 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-11-10T10:32:06,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 423 msec 2024-11-10T10:32:07,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-10T10:32:07,146 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-11-10T10:32:07,146 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-11-10T10:32:07,148 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T10:32:07,153 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-11-10T10:32:07,153 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-10T10:32:07,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-11-10T10:32:07,169 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=2] 2024-11-10T10:32:07,170 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T10:32:07,172 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T10:32:07,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=298cceebf7d48f429a5ffead936f01b2, source=eccb6f743e07,39345,1731234711428, destination=eccb6f743e07,44191,1731234711534, warming up region on eccb6f743e07,44191,1731234711534 2024-11-10T10:32:07,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T10:32:07,187 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=298cceebf7d48f429a5ffead936f01b2, source=eccb6f743e07,39345,1731234711428, destination=eccb6f743e07,44191,1731234711534, running balancer 2024-11-10T10:32:07,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE 2024-11-10T10:32:07,189 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE 2024-11-10T10:32:07,189 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45195, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T10:32:07,191 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=CLOSING, regionLocation=eccb6f743e07,39345,1731234711428 2024-11-10T10:32:07,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(7855): Warmup {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:07,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:07,194 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE because future has completed 2024-11-10T10:32:07,195 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-10T10:32:07,195 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:07,195 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:07,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428}] 2024-11-10T10:32:07,196 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:07,196 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,197 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:07,197 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:07,198 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:07,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:07,198 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:07,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234727198Disabling compacts and flushes for region at 1731234727198Disabling writes for close at 1731234727198Writing region close event to WAL at 1731234727206 (+8 ms)Closed at 1731234727206 2024-11-10T10:32:07,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-11-10T10:32:07,354 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,354 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-10T10:32:07,355 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:07,355 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,355 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,355 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:07,355 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,355 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 298cceebf7d48f429a5ffead936f01b2 2/2 column families, dataSize=31 B heapSize=616 B 2024-11-10T10:32:07,374 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/ac22617072d04227aa42d29ea43abcc2 is 35, key is r1/cf1:q/1731234727173/Put/seqid=0 2024-11-10T10:32:07,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741887_1065 (size=4783) 2024-11-10T10:32:07,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741887_1065 (size=4783) 2024-11-10T10:32:07,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741887_1065 (size=4783) 2024-11-10T10:32:07,381 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/ac22617072d04227aa42d29ea43abcc2 2024-11-10T10:32:07,388 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/ac22617072d04227aa42d29ea43abcc2 as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2 2024-11-10T10:32:07,394 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2, entries=1, sequenceid=5, filesize=4.7 K 2024-11-10T10:32:07,395 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 298cceebf7d48f429a5ffead936f01b2 in 40ms, sequenceid=5, compaction requested=false 2024-11-10T10:32:07,395 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-11-10T10:32:07,400 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T10:32:07,403 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,403 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234727355Running coprocessor pre-close hooks at 1731234727355Disabling compacts and flushes for region at 1731234727355Disabling writes for close at 1731234727355Obtaining lock to block concurrent updates at 1731234727355Preparing flush snapshotting stores in 298cceebf7d48f429a5ffead936f01b2 at 1731234727355Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1731234727355Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. at 1731234727356 (+1 ms)Flushing 298cceebf7d48f429a5ffead936f01b2/cf1: creating writer at 1731234727356Flushing 298cceebf7d48f429a5ffead936f01b2/cf1: appending metadata at 1731234727373 (+17 ms)Flushing 298cceebf7d48f429a5ffead936f01b2/cf1: closing flushed file at 1731234727374 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@287fd134: reopening flushed file at 1731234727388 (+14 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 298cceebf7d48f429a5ffead936f01b2 in 40ms, sequenceid=5, compaction requested=false at 1731234727395 (+7 ms)Writing region close event to WAL at 1731234727397 (+2 ms)Running coprocessor post-close hooks at 1731234727401 (+4 ms)Closed at 1731234727403 (+2 ms) 2024-11-10T10:32:07,404 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 298cceebf7d48f429a5ffead936f01b2 move to eccb6f743e07,44191,1731234711534 record at close sequenceid=5 2024-11-10T10:32:07,407 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,408 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=CLOSED 2024-11-10T10:32:07,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 because future has completed 2024-11-10T10:32:07,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-10T10:32:07,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 in 217 msec 2024-11-10T10:32:07,417 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE; state=CLOSED, location=eccb6f743e07,44191,1731234711534; forceNewPlan=false, retain=false 2024-11-10T10:32:07,567 INFO [eccb6f743e07:38215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T10:32:07,567 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPENING, regionLocation=eccb6f743e07,44191,1731234711534 2024-11-10T10:32:07,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE because future has completed 2024-11-10T10:32:07,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534}] 2024-11-10T10:32:07,729 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,729 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:07,729 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,729 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:07,730 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,730 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,731 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,732 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:07,732 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:07,739 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2 2024-11-10T10:32:07,739 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:07,739 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,740 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:07,740 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:07,740 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:07,741 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,741 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,742 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,743 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,743 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,744 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-10T10:32:07,745 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,746 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 298cceebf7d48f429a5ffead936f01b2; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69607505, jitterRate=0.0372326523065567}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-10T10:32:07,746 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:07,747 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 298cceebf7d48f429a5ffead936f01b2: Running coprocessor pre-open hook at 1731234727730Writing region info on filesystem at 1731234727730Initializing all the Stores at 1731234727731 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234727731Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234727731Cleaning up temporary data from old regions at 1731234727743 (+12 ms)Running coprocessor post-open hooks at 1731234727746 (+3 ms)Region opened successfully at 1731234727747 (+1 ms) 2024-11-10T10:32:07,748 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., pid=9, masterSystemTime=1731234727724 2024-11-10T10:32:07,751 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,751 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:07,752 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPEN, openSeqNum=9, regionLocation=eccb6f743e07,44191,1731234711534 2024-11-10T10:32:07,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 because future has completed 2024-11-10T10:32:07,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-10T10:32:07,759 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 in 185 msec 2024-11-10T10:32:07,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE in 572 msec 2024-11-10T10:32:07,794 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T10:32:07,796 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53472, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T10:32:07,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:48856 deadline: 1731234787799, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=44191 startCode=1731234711534. As of locationSeqNum=5. 2024-11-10T10:32:07,805 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=44191 startCode=1731234711534. As of locationSeqNum=5. 2024-11-10T10:32:07,806 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=44191 startCode=1731234711534. As of locationSeqNum=5. 2024-11-10T10:32:07,806 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=44191 startCode=1731234711534. As of locationSeqNum=5. 2024-11-10T10:32:07,915 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-10T10:32:07,917 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53476, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-10T10:32:07,928 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 298cceebf7d48f429a5ffead936f01b2 2/2 column families, dataSize=50 B heapSize=720 B 2024-11-10T10:32:07,945 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/59b7628fb66d4f2cbff0eae4d693424d is 29, key is r1/cf1:/1731234727918/DeleteFamily/seqid=0 2024-11-10T10:32:07,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741888_1066 (size=4906) 2024-11-10T10:32:07,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741888_1066 (size=4906) 2024-11-10T10:32:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741888_1066 (size=4906) 2024-11-10T10:32:07,964 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:07,971 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:07,991 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf2/7e399e5a69484931a270f0845505bff0 is 29, key is r1/cf2:/1731234727918/DeleteFamily/seqid=0 2024-11-10T10:32:08,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741889_1067 (size=4906) 2024-11-10T10:32:08,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741889_1067 (size=4906) 2024-11-10T10:32:08,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741889_1067 (size=4906) 2024-11-10T10:32:08,004 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf2/7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,011 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,012 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/59b7628fb66d4f2cbff0eae4d693424d as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:08,020 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:08,021 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d, entries=1, sequenceid=12, filesize=4.8 K 2024-11-10T10:32:08,022 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf2/7e399e5a69484931a270f0845505bff0 as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,029 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,029 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0, entries=1, sequenceid=12, filesize=4.8 K 2024-11-10T10:32:08,031 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 298cceebf7d48f429a5ffead936f01b2 in 103ms, sequenceid=12, compaction requested=false 2024-11-10T10:32:08,031 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 298cceebf7d48f429a5ffead936f01b2: 2024-11-10T10:32:08,034 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-10T10:32:08,036 DEBUG [Time-limited test {}] regionserver.HStore(1541): 298cceebf7d48f429a5ffead936f01b2/cf1 is initiating major compaction (all files) 2024-11-10T10:32:08,036 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T10:32:08,036 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:32:08,037 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 298cceebf7d48f429a5ffead936f01b2/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,037 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2, hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d] into tmpdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp, totalSize=9.5 K 2024-11-10T10:32:08,038 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting ac22617072d04227aa42d29ea43abcc2, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731234727173 2024-11-10T10:32:08,039 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 59b7628fb66d4f2cbff0eae4d693424d, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-10T10:32:08,051 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 298cceebf7d48f429a5ffead936f01b2#cf1#compaction#16 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T10:32:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741890_1068 (size=4626) 2024-11-10T10:32:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741890_1068 (size=4626) 2024-11-10T10:32:08,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741890_1068 (size=4626) 2024-11-10T10:32:08,067 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf1/2cab767fb9d441f8a3a74d2fa53c3db1 as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/2cab767fb9d441f8a3a74d2fa53c3db1 2024-11-10T10:32:08,083 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 298cceebf7d48f429a5ffead936f01b2/cf1 of 298cceebf7d48f429a5ffead936f01b2 into 2cab767fb9d441f8a3a74d2fa53c3db1(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T10:32:08,083 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 298cceebf7d48f429a5ffead936f01b2: 2024-11-10T10:32:08,083 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-10T10:32:08,083 DEBUG [Time-limited test {}] regionserver.HStore(1541): 298cceebf7d48f429a5ffead936f01b2/cf2 is initiating major compaction (all files) 2024-11-10T10:32:08,083 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-10T10:32:08,083 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-10T10:32:08,083 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 298cceebf7d48f429a5ffead936f01b2/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,084 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0] into tmpdir=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp, totalSize=4.8 K 2024-11-10T10:32:08,084 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 7e399e5a69484931a270f0845505bff0, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-10T10:32:08,091 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 298cceebf7d48f429a5ffead936f01b2#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-10T10:32:08,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741891_1069 (size=4592) 2024-11-10T10:32:08,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741891_1069 (size=4592) 2024-11-10T10:32:08,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741891_1069 (size=4592) 2024-11-10T10:32:08,112 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/.tmp/cf2/4ff27b3fb6aa4b449446fe9cca522f8a as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/4ff27b3fb6aa4b449446fe9cca522f8a 2024-11-10T10:32:08,120 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 298cceebf7d48f429a5ffead936f01b2/cf2 of 298cceebf7d48f429a5ffead936f01b2 into 4ff27b3fb6aa4b449446fe9cca522f8a(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T10:32:08,120 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 298cceebf7d48f429a5ffead936f01b2: 2024-11-10T10:32:08,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=298cceebf7d48f429a5ffead936f01b2, source=eccb6f743e07,44191,1731234711534, destination=eccb6f743e07,39345,1731234711428, warming up region on eccb6f743e07,39345,1731234711428 2024-11-10T10:32:08,125 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=298cceebf7d48f429a5ffead936f01b2, source=eccb6f743e07,44191,1731234711534, destination=eccb6f743e07,39345,1731234711428, running balancer 2024-11-10T10:32:08,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE 2024-11-10T10:32:08,126 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE 2024-11-10T10:32:08,128 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=CLOSING, regionLocation=eccb6f743e07,44191,1731234711534 2024-11-10T10:32:08,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(7855): Warmup {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:08,130 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,133 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:08,133 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:08,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE because future has completed 2024-11-10T10:32:08,145 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-10T10:32:08,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534}] 2024-11-10T10:32:08,152 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/2cab767fb9d441f8a3a74d2fa53c3db1 2024-11-10T10:32:08,159 INFO [StoreFileOpener-298cceebf7d48f429a5ffead936f01b2-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:08,159 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:08,165 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2 2024-11-10T10:32:08,166 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:08,166 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,167 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:08,167 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:08,174 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/4ff27b3fb6aa4b449446fe9cca522f8a 2024-11-10T10:32:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-11-10T10:32:08,179 INFO [StoreFileOpener-298cceebf7d48f429a5ffead936f01b2-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,179 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,179 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:08,179 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:08,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39345 {}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234728179Disabling compacts and flushes for region at 1731234728179Disabling writes for close at 1731234728179Writing region close event to WAL at 1731234728181 (+2 ms)Closed at 1731234728181 2024-11-10T10:32:08,299 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,299 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-10T10:32:08,299 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:08,299 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,299 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,299 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:08,299 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,300 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2, hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d] to archive 2024-11-10T10:32:08,303 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T10:32:08,306 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2 to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/ac22617072d04227aa42d29ea43abcc2 2024-11-10T10:32:08,308 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/59b7628fb66d4f2cbff0eae4d693424d 2024-11-10T10:32:08,322 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0] to archive 2024-11-10T10:32:08,322 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T10:32:08,324 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0 to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/7e399e5a69484931a270f0845505bff0 2024-11-10T10:32:08,330 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-11-10T10:32:08,331 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,331 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234728299Running coprocessor pre-close hooks at 1731234728299Disabling compacts and flushes for region at 1731234728299Disabling writes for close at 1731234728299Writing region close event to WAL at 1731234728326 (+27 ms)Running coprocessor post-close hooks at 1731234728331 (+5 ms)Closed at 1731234728331 2024-11-10T10:32:08,331 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 298cceebf7d48f429a5ffead936f01b2 move to eccb6f743e07,39345,1731234711428 record at close sequenceid=12 2024-11-10T10:32:08,334 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,336 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=CLOSED 2024-11-10T10:32:08,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 because future has completed 2024-11-10T10:32:08,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-10T10:32:08,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 in 195 msec 2024-11-10T10:32:08,344 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE; state=CLOSED, location=eccb6f743e07,39345,1731234711428; forceNewPlan=false, retain=false 2024-11-10T10:32:08,495 INFO [eccb6f743e07:38215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-10T10:32:08,495 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPENING, regionLocation=eccb6f743e07,39345,1731234711428 2024-11-10T10:32:08,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE because future has completed 2024-11-10T10:32:08,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428}] 2024-11-10T10:32:08,662 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,662 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:08,663 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,663 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:08,663 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,663 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,665 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,666 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:08,666 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:08,672 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/2cab767fb9d441f8a3a74d2fa53c3db1 2024-11-10T10:32:08,672 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:08,673 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,674 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:08,674 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:08,680 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/4ff27b3fb6aa4b449446fe9cca522f8a 2024-11-10T10:32:08,680 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:08,681 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,681 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,683 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,684 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,684 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,685 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-10T10:32:08,686 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,687 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 298cceebf7d48f429a5ffead936f01b2; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69398301, jitterRate=0.034115269780159}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-10T10:32:08,687 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,688 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 298cceebf7d48f429a5ffead936f01b2: Running coprocessor pre-open hook at 1731234728663Writing region info on filesystem at 1731234728663Initializing all the Stores at 1731234728664 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234728664Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234728665 (+1 ms)Cleaning up temporary data from old regions at 1731234728684 (+19 ms)Running coprocessor post-open hooks at 1731234728687 (+3 ms)Region opened successfully at 1731234728688 (+1 ms) 2024-11-10T10:32:08,689 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., pid=12, masterSystemTime=1731234728657 2024-11-10T10:32:08,692 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,692 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,693 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPEN, openSeqNum=18, regionLocation=eccb6f743e07,39345,1731234711428 2024-11-10T10:32:08,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 because future has completed 2024-11-10T10:32:08,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-10T10:32:08,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,39345,1731234711428 in 192 msec 2024-11-10T10:32:08,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, REOPEN/MOVE in 574 msec 2024-11-10T10:32:08,728 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-10T10:32:08,731 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48864, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-10T10:32:08,733 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server eccb6f743e07,39345,1731234711428: testing ***** 2024-11-10T10:32:08,733 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-10T10:32:08,736 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-10T10:32:08,738 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-10T10:32:08,741 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-10T10:32:08,742 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-10T10:32:08,749 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 322152000 }, "NonHeapMemoryUsage": { "committed": 172032000, "init": 7667712, "max": -1, "used": 169360576 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "eccb6f743e07", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 0, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 2071, "exceptions.RegionMovedException": 0, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 8, "ProcessCallTime_min": 0, "ProcessCallTime_max": 11, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 2, "ProcessCallTime_median": 5, "ProcessCallTime_75th_percentile": 8, "ProcessCallTime_90th_percentile": 9, "ProcessCallTime_95th_percentile": 10, "ProcessCallTime_98th_percentile": 10, "ProcessCallTime_99th_percentile": 10, "ProcessCallTime_99.9th_percentile": 10, "ProcessCallTime_TimeRangeCount_0-1": 8, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 8, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 0, "QueueCallTime_99th_percentile": 0, "QueueCallTime_99.9th_percentile": 0, "QueueCallTime_TimeRangeCount_0-1": 8, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 8, "TotalCallTime_min": 1, "TotalCallTime_max": 11, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 3, "TotalCallTime_median": 6, "TotalCallTime_75th_percentile": 8, "TotalCallTime_90th_percentile": 10, "TotalCallTime_95th_percentile": 10, "TotalCallTime_98th_percentile": 10, "TotalCallTime_99th_percentile": 10, "TotalCallTime_99.9th_percentile": 10, "TotalCallTime_TimeRangeCount_0-1": 8, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 8, "ResponseSize_min": 0, "ResponseSize_max": 174, "ResponseSize_mean": 50, "ResponseSize_25th_percentile": 43, "ResponseSize_median": 87, "ResponseSize_75th_percentile": 130, "ResponseSize_90th_percentile": 156, "ResponseSize_95th_percentile": 165, "ResponseSize_98th_percentile": 170, "ResponseSize_99th_percentile": 172, "ResponseSize_99.9th_percentile": 173, "ResponseSize_SizeRangeCount_0-10": 8, "exceptions.UnknownScannerException": 0, "exceptions": 0, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 8, "RequestSize_min": 89, "RequestSize_max": 121, "RequestSize_mean": 103, "RequestSize_25th_percentile": 97, "RequestSize_median": 105, "RequestSize_75th_percentile": 113, "RequestSize_90th_percentile": 117, "RequestSize_95th_percentile": 119, "RequestSize_98th_percentile": 120, "RequestSize_99th_percentile": 120, "RequestSize_99.9th_percentile": 120, "RequestSize_SizeRangeCount_0-10": 8, "sentBytes": 348 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "eccb6f743e07", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:57026", "tag.serverName": "eccb6f743e07,39345,1731234711428", "tag.clusterId": "4ca9019e-9f51-4a88-9744-47822550e251", "tag.Context": "regionserver", "tag.Hostname": "eccb6f743e07", "regionCount": 0, "storeCount": 0, "hlogFileCount": 1, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1731234711428, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 10034, "localBytesRead": 10034, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 0.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 0, "l1CacheMissCount": 0, "l1CacheHitRatio": 0.0, "l1CacheMissRatio": 0.0, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 4782, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 0.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 0, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 0, "rpcScanRequestCount": 0, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 0, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 0, "blockCacheHitCountPrimary": 0, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 0, "blockCacheMissCountPrimary": 0, "blockCacheMissCachingCount": 0, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 0, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 0, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 0, "MajorCompactionTime_min": 0, "MajorCompactionTime_max": 0, "MajorCompactionTime_mean": 0, "MajorCompactionTime_25th_percentile": 0, "MajorCompactionTime_median": 0, "MajorCompactionTime_75th_percentile": 0, "MajorCompactionTime_90th_percentile": 0, "MajorCompactionTime_95th_percentile": 0, "MajorCompactionTime_98th_percentile": 0, "MajorCompactionTime_99th_percentile": 0, "MajorCompactionTime_99.9th_percentile": 0, "ScanTime_num_ops": 0, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 0, "slowAppendCount": 0, "flushedOutputBytes": 0, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 0, "MajorCompactionInputSize_min": 0, "MajorCompactionInputSize_max": 0, "MajorCompactionInputSize_mean": 0, "MajorCompactionInputSize_25th_percentile": 0, "MajorCompactionInputSize_median": 0, "MajorCompactionInputSize_75th_percentile": 0, "MajorCompactionInputSize_90th_percentile": 0, "MajorCompactionInputSize_95th_percentile": 0, "MajorCompactionInputSize_98th_percentile": 0, "MajorCompactionInputSize_99th_percentile": 0, "MajorCompactionInputSize_99.9th_percentile": 0, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 0, "CompactionInputSize_min": 0, "CompactionInputSize_max": 0, "CompactionInputSize_mean": 0, "CompactionInputSize_25th_percentile": 0, "CompactionInputSize_median": 0, "CompactionInputSize_75th_percentile": 0, "CompactionInputSize_90th_percentile": 0, "CompactionInputSize_95th_percentile": 0, "CompactionInputSize_98th_percentile": 0, "CompactionInputSize_99th_percentile": 0, "CompactionInputSize_99.9th_percentile": 0, "flushedMemstoreBytes": 0, "majorCompactedOutputBytes": 0, "slowPutCount": 0, "compactedInputBytes": 0, "FlushOutputSize_num_ops": 0, "FlushOutputSize_min": 0, "FlushOutputSize_max": 0, "FlushOutputSize_mean": 0, "FlushOutputSize_25th_percentile": 0, "FlushOutputSize_median": 0, "FlushOutputSize_75th_percentile": 0, "FlushOutputSize_90th_percentile": 0, "FlushOutputSize_95th_percentile": 0, "FlushOutputSize_98th_percentile": 0, "FlushOutputSize_99th_percentile": 0, "FlushOutputSize_99.9th_percentile": 0, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 0, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 0, "ScanBlockBytesScanned_min": 0, "ScanBlockBytesScanned_max": 0, "ScanBlockBytesScanned_mean": 0, "ScanBlockBytesScanned_25th_percentile": 0, "ScanBlockBytesScanned_median": 0, "ScanBlockBytesScanned_75th_percentile": 0, "ScanBlockBytesScanned_90th_percentile": 0, "ScanBlockBytesScanned_95th_percentile": 0, "ScanBlockBytesScanned_98th_percentile": 0, "ScanBlockBytesScanned_99th_percentile": 0, "ScanBlockBytesScanned_99.9th_percentile": 0, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 0, "Delete_min": 0, "Delete_max": 0, "Delete_mean": 0, "Delete_25th_percentile": 0, "Delete_median": 0, "Delete_75th_percentile": 0, "Delete_90th_percentile": 0, "Delete_95th_percentile": 0, "Delete_98th_percentile": 0, "Delete_99th_percentile": 0, "Delete_99.9th_percentile": 0, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 0, "FlushMemstoreSize_num_ops": 0, "FlushMemstoreSize_min": 0, "FlushMemstoreSize_max": 0, "FlushMemstoreSize_mean": 0, "FlushMemstoreSize_25th_percentile": 0, "FlushMemstoreSize_median": 0, "FlushMemstoreSize_75th_percentile": 0, "FlushMemstoreSize_90th_percentile": 0, "FlushMemstoreSize_95th_percentile": 0, "FlushMemstoreSize_98th_percentile": 0, "FlushMemstoreSize_99th_percentile": 0, "FlushMemstoreSize_99.9th_percentile": 0, "CompactionInputFileCount_num_ops": 0, "CompactionInputFileCount_min": 0, "CompactionInputFileCount_max": 0, "CompactionInputFileCount_mean": 0, "CompactionInputFileCount_25th_percentile": 0, "CompactionInputFileCount_median": 0, "CompactionInputFileCount_75th_percentile": 0, "CompactionInputFileCount_90th_percentile": 0, "CompactionInputFileCount_95th_percentile": 0, "CompactionInputFileCount_98th_percentile": 0, "CompactionInputFileCount_99th_percentile": 0, "CompactionInputFileCount_99.9th_percentile": 0, "CompactionTime_num_ops": 0, "CompactionTime_min": 0, "CompactionTime_max": 0, "CompactionTime_mean": 0, "CompactionTime_25th_percentile": 0, "CompactionTime_median": 0, "CompactionTime_75th_percentile": 0, "CompactionTime_90th_percentile": 0, "CompactionTime_95th_percentile": 0, "CompactionTime_98th_percentile": 0, "CompactionTime_99th_percentile": 0, "CompactionTime_99.9th_percentile": 0, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 0, "MajorCompactionInputFileCount_min": 0, "MajorCompactionInputFileCount_max": 0, "MajorCompactionInputFileCount_mean": 0, "MajorCompactionInputFileCount_25th_percentile": 0, "MajorCompactionInputFileCount_median": 0, "MajorCompactionInputFileCount_75th_percentile": 0, "MajorCompactionInputFileCount_90th_percentile": 0, "MajorCompactionInputFileCount_95th_percentile": 0, "MajorCompactionInputFileCount_98th_percentile": 0, "MajorCompactionInputFileCount_99th_percentile": 0, "MajorCompactionInputFileCount_99.9th_percentile": 0, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 0, "MajorCompactionOutputSize_min": 0, "MajorCompactionOutputSize_max": 0, "MajorCompactionOutputSize_mean": 0, "MajorCompactionOutputSize_25th_percentile": 0, "MajorCompactionOutputSize_median": 0, "MajorCompactionOutputSize_75th_percentile": 0, "MajorCompactionOutputSize_90th_percentile": 0, "MajorCompactionOutputSize_95th_percentile": 0, "MajorCompactionOutputSize_98th_percentile": 0, "MajorCompactionOutputSize_99th_percentile": 0, "MajorCompactionOutputSize_99.9th_percentile": 0, "CompactionOutputFileCount_num_ops": 0, "CompactionOutputFileCount_min": 0, "CompactionOutputFileCount_max": 0, "CompactionOutputFileCount_mean": 0, "CompactionOutputFileCount_25th_percentile": 0, "CompactionOutputFileCount_median": 0, "CompactionOutputFileCount_75th_percentile": 0, "CompactionOutputFileCount_90th_percentile": 0, "CompactionOutputFileCount_95th_percentile": 0, "CompactionOutputFileCount_98th_percentile": 0, "CompactionOutputFileCount_99th_percentile": 0, "CompactionOutputFileCount_99.9th_percentile": 0, "slowDeleteCount": 0, "FlushTime_num_ops": 0, "FlushTime_min": 0, "FlushTime_max": 0, "FlushTime_mean": 0, "FlushTime_25th_percentile": 0, "FlushTime_median": 0, "FlushTime_75th_percentile": 0, "FlushTime_90th_percentile": 0, "FlushTime_95th_percentile": 0, "FlushTime_98th_percentile": 0, "FlushTime_99th_percentile": 0, "FlushTime_99.9th_percentile": 0, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 0, "MajorCompactionOutputFileCount_min": 0, "MajorCompactionOutputFileCount_max": 0, "MajorCompactionOutputFileCount_mean": 0, "MajorCompactionOutputFileCount_25th_percentile": 0, "MajorCompactionOutputFileCount_median": 0, "MajorCompactionOutputFileCount_75th_percentile": 0, "MajorCompactionOutputFileCount_90th_percentile": 0, "MajorCompactionOutputFileCount_95th_percentile": 0, "MajorCompactionOutputFileCount_98th_percentile": 0, "MajorCompactionOutputFileCount_99th_percentile": 0, "MajorCompactionOutputFileCount_99.9th_percentile": 0, "slowGetCount": 0, "ScanSize_num_ops": 0, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "CompactionOutputSize_num_ops": 0, "CompactionOutputSize_min": 0, "CompactionOutputSize_max": 0, "CompactionOutputSize_mean": 0, "CompactionOutputSize_25th_percentile": 0, "CompactionOutputSize_median": 0, "CompactionOutputSize_75th_percentile": 0, "CompactionOutputSize_90th_percentile": 0, "CompactionOutputSize_95th_percentile": 0, "CompactionOutputSize_98th_percentile": 0, "CompactionOutputSize_99th_percentile": 0, "CompactionOutputSize_99.9th_percentile": 0, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-11-10T10:32:08,753 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38215 {}] master.MasterRpcServices(700): eccb6f743e07,39345,1731234711428 reported a fatal error: ***** ABORTING region server eccb6f743e07,39345,1731234711428: testing ***** 2024-11-10T10:32:08,755 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'eccb6f743e07,39345,1731234711428' ***** 2024-11-10T10:32:08,756 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-11-10T10:32:08,756 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T10:32:08,756 INFO [RS:0;eccb6f743e07:39345 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-11-10T10:32:08,756 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T10:32:08,756 INFO [RS:0;eccb6f743e07:39345 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-11-10T10:32:08,757 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(3091): Received CLOSE for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,757 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(956): aborting server eccb6f743e07,39345,1731234711428 2024-11-10T10:32:08,757 INFO [RS:0;eccb6f743e07:39345 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T10:32:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44191 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:53476 deadline: 1731234788757, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=39345 startCode=1731234711428. As of locationSeqNum=12. 2024-11-10T10:32:08,757 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:08,757 INFO [RS:0;eccb6f743e07:39345 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;eccb6f743e07:39345. 2024-11-10T10:32:08,757 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,758 DEBUG [RS:0;eccb6f743e07:39345 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T10:32:08,758 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,758 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:08,758 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,758 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=39345 startCode=1731234711428. As of locationSeqNum=12. 2024-11-10T10:32:08,758 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=39345 startCode=1731234711428. As of locationSeqNum=12. 2024-11-10T10:32:08,758 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=eccb6f743e07 port=39345 startCode=1731234711428. As of locationSeqNum=12. 2024-11-10T10:32:08,758 DEBUG [RS:0;eccb6f743e07:39345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:08,759 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T10:32:08,759 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1325): Online Regions={298cceebf7d48f429a5ffead936f01b2=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.} 2024-11-10T10:32:08,759 DEBUG [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1351): Waiting on 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:08,762 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,762 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234728757Running coprocessor pre-close hooks at 1731234728757Disabling compacts and flushes for region at 1731234728757Disabling writes for close at 1731234728758 (+1 ms)Writing region close event to WAL at 1731234728762 (+4 ms)Running coprocessor post-close hooks at 1731234728762Closed at 1731234728762 2024-11-10T10:32:08,762 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:08,808 INFO [regionserver/eccb6f743e07:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:08,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server eccb6f743e07,39345,1731234711428 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:08,867 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=12, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server eccb6f743e07,39345,1731234711428 aborting 2024-11-10T10:32:08,867 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=12 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server eccb6f743e07,39345,1731234711428 aborting 2024-11-10T10:32:08,867 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=12 from cache 2024-11-10T10:32:08,959 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(976): stopping server eccb6f743e07,39345,1731234711428; all regions closed. 2024-11-10T10:32:08,961 INFO [regionserver/eccb6f743e07:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-10T10:32:08,962 INFO [regionserver/eccb6f743e07:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-10T10:32:08,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741833_1009 (size=1399) 2024-11-10T10:32:08,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741833_1009 (size=1399) 2024-11-10T10:32:08,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741833_1009 (size=1399) 2024-11-10T10:32:08,964 DEBUG [RS:0;eccb6f743e07:39345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] hbase.ChoreService(370): Chore service for: regionserver/eccb6f743e07:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T10:32:08,965 INFO [regionserver/eccb6f743e07:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T10:32:08,965 INFO [RS:0;eccb6f743e07:39345 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T10:32:08,966 INFO [RS:0;eccb6f743e07:39345 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39345 2024-11-10T10:32:08,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/eccb6f743e07,39345,1731234711428 2024-11-10T10:32:08,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T10:32:08,973 INFO [RS:0;eccb6f743e07:39345 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T10:32:08,976 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [eccb6f743e07,39345,1731234711428] 2024-11-10T10:32:08,978 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/eccb6f743e07,39345,1731234711428 already deleted, retry=false 2024-11-10T10:32:08,978 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of eccb6f743e07,39345,1731234711428 on eccb6f743e07,38215,1731234710743 2024-11-10T10:32:08,985 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure eccb6f743e07,39345,1731234711428, splitWal=true, meta=false 2024-11-10T10:32:08,987 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure eccb6f743e07,39345,1731234711428, splitWal=true, meta=false 2024-11-10T10:32:08,987 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1991): Scheduled ServerCrashProcedure pid=13 for eccb6f743e07,39345,1731234711428 (carryingMeta=false) eccb6f743e07,39345,1731234711428/CRASHED/regionCount=1/lock=java.util.concurrent.locks.ReentrantReadWriteLock@4c4d71fe[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-10T10:32:08,990 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(207): eccb6f743e07,39345,1731234711428 had 1 regions 2024-11-10T10:32:08,991 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure eccb6f743e07,39345,1731234711428, splitWal=true, meta=false, isMeta: false 2024-11-10T10:32:08,993 DEBUG [PEWorker-3 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting 2024-11-10T10:32:08,994 INFO [PEWorker-3 {}] master.SplitWALManager(105): eccb6f743e07,39345,1731234711428 WAL count=1, meta=false 2024-11-10T10:32:08,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure eccb6f743e07%2C39345%2C1731234711428.1731234713068}] 2024-11-10T10:32:09,012 DEBUG [PEWorker-1 {}] master.SplitWALManager(158): Acquired split WAL worker=eccb6f743e07,39867,1731234711581 2024-11-10T10:32:09,016 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure eccb6f743e07%2C39345%2C1731234711428.1731234713068, worker=eccb6f743e07,39867,1731234711581}] 2024-11-10T10:32:09,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:09,077 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39345-0x101016f2c550001, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:09,077 INFO [RS:0;eccb6f743e07:39345 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T10:32:09,077 INFO [RS:0;eccb6f743e07:39345 {}] regionserver.HRegionServer(1031): Exiting; stopping=eccb6f743e07,39345,1731234711428; zookeeper connection closed. 2024-11-10T10:32:09,078 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@345e7c99 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@345e7c99 2024-11-10T10:32:09,079 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18] 2024-11-10T10:32:09,081 WARN [RPCClient-NioEventLoopGroup-6-5 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server eccb6f743e07:39345 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: eccb6f743e07/172.17.0.2:39345 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:09,082 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18, error=java.net.ConnectException: Call to address=eccb6f743e07:39345 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: eccb6f743e07/172.17.0.2:39345 2024-11-10T10:32:09,082 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 is java.net.ConnectException: Connection refused 2024-11-10T10:32:09,082 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 from cache 2024-11-10T10:32:09,082 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.FailedServers(52): Added failed server with address eccb6f743e07:39345 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: eccb6f743e07/172.17.0.2:39345 2024-11-10T10:32:09,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39867 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-11-10T10:32:09,206 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068, size=1.4 K (1399bytes) 2024-11-10T10:32:09,207 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 2024-11-10T10:32:09,207 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 after 0ms 2024-11-10T10:32:09,210 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:09,211 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 took 5ms 2024-11-10T10:32:09,218 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 298cceebf7d48f429a5ffead936f01b2: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-11-10T10:32:09,219 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 so closing down 2024-11-10T10:32:09,219 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:09,219 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:09,219 INFO [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068, size=1.4 K, length=1399, corrupted=false, cancelled=false 2024-11-10T10:32:09,219 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068, journal: Splitting hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068, size=1.4 K (1399bytes) at 1731234729206Finishing writing output for hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 so closing down at 1731234729219 (+13 ms)3 split writer threads finished at 1731234729219Processed 6 edits across 0 Regions in 8 ms; skipped=6; WAL=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068, size=1.4 K, length=1399, corrupted=false, cancelled=false at 1731234729219 2024-11-10T10:32:09,219 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 2024-11-10T10:32:09,221 DEBUG [RS_LOG_REPLAY_OPS-regionserver/eccb6f743e07:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-10T10:32:09,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38215 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-11-10T10:32:09,229 INFO [PEWorker-4 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting/eccb6f743e07%2C39345%2C1731234711428.1731234713068 to hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs 2024-11-10T10:32:09,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-10T10:32:09,232 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure eccb6f743e07%2C39345%2C1731234711428.1731234713068, worker=eccb6f743e07,39867,1731234711581 in 214 msec 2024-11-10T10:32:09,233 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=eccb6f743e07,39867,1731234711581 2024-11-10T10:32:09,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-10T10:32:09,237 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure eccb6f743e07%2C39345%2C1731234711428.1731234713068, worker=eccb6f743e07,39867,1731234711581 in 238 msec 2024-11-10T10:32:09,239 INFO [PEWorker-3 {}] master.SplitLogManager(171): hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting dir is empty, no logs to split. 2024-11-10T10:32:09,239 INFO [PEWorker-3 {}] master.SplitWALManager(105): eccb6f743e07,39345,1731234711428 WAL count=0, meta=false 2024-11-10T10:32:09,239 DEBUG [PEWorker-3 {}] procedure.ServerCrashProcedure(329): Check if eccb6f743e07,39345,1731234711428 WAL splitting is done? wals=0, meta=false 2024-11-10T10:32:09,241 INFO [PEWorker-3 {}] procedure.ServerCrashProcedure(321): Remove WAL directory for eccb6f743e07,39345,1731234711428 failed, ignore...File hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/WALs/eccb6f743e07,39345,1731234711428-splitting does not exist. 2024-11-10T10:32:09,243 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN}] 2024-11-10T10:32:09,245 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN 2024-11-10T10:32:09,246 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-10T10:32:09,389 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18] 2024-11-10T10:32:09,389 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.AbstractRpcClient(357): Not trying to connect to eccb6f743e07:39345 this server is in the failed servers list 2024-11-10T10:32:09,390 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=eccb6f743e07:39345 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: eccb6f743e07:39345 2024-11-10T10:32:09,390 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: eccb6f743e07:39345 2024-11-10T10:32:09,390 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,39345,1731234711428, seqNum=18 from cache 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(204): Hosts are {eccb6f743e07=0} racks are {/default-rack=0} 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-10T10:32:09,396 INFO [eccb6f743e07:38215 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-10T10:32:09,396 INFO [eccb6f743e07:38215 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-10T10:32:09,396 DEBUG [eccb6f743e07:38215 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-10T10:32:09,397 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPENING, regionLocation=eccb6f743e07,44191,1731234711534 2024-11-10T10:32:09,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN because future has completed 2024-11-10T10:32:09,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534}] 2024-11-10T10:32:09,557 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:09,557 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 298cceebf7d48f429a5ffead936f01b2, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:09,558 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,558 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:09,558 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7794): checking encryption for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,558 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(7797): checking classloading for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,560 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,561 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf1 2024-11-10T10:32:09,561 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:09,569 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf1/2cab767fb9d441f8a3a74d2fa53c3db1 2024-11-10T10:32:09,569 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:09,569 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,570 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 298cceebf7d48f429a5ffead936f01b2 columnFamilyName cf2 2024-11-10T10:32:09,570 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:09,577 DEBUG [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/cf2/4ff27b3fb6aa4b449446fe9cca522f8a 2024-11-10T10:32:09,577 INFO [StoreOpener-298cceebf7d48f429a5ffead936f01b2-1 {}] regionserver.HStore(327): Store=298cceebf7d48f429a5ffead936f01b2/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:09,577 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1038): replaying wal for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,578 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,579 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,580 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1048): stopping wal replay for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,580 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,581 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-10T10:32:09,583 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1093): writing seq id for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,584 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1114): Opened 298cceebf7d48f429a5ffead936f01b2; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67307006, jitterRate=0.0029525458812713623}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-10T10:32:09,584 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:09,585 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegion(1006): Region open journal for 298cceebf7d48f429a5ffead936f01b2: Running coprocessor pre-open hook at 1731234729558Writing region info on filesystem at 1731234729558Initializing all the Stores at 1731234729559 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234729559Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234729559Cleaning up temporary data from old regions at 1731234729580 (+21 ms)Running coprocessor post-open hooks at 1731234729584 (+4 ms)Region opened successfully at 1731234729585 (+1 ms) 2024-11-10T10:32:09,586 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., pid=17, masterSystemTime=1731234729552 2024-11-10T10:32:09,590 DEBUG [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:09,590 INFO [RS_OPEN_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_OPEN_REGION, pid=17}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:09,591 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=298cceebf7d48f429a5ffead936f01b2, regionState=OPEN, openSeqNum=18, regionLocation=eccb6f743e07,44191,1731234711534 2024-11-10T10:32:09,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 because future has completed 2024-11-10T10:32:09,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-11-10T10:32:09,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 298cceebf7d48f429a5ffead936f01b2, server=eccb6f743e07,44191,1731234711534 in 194 msec 2024-11-10T10:32:09,601 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-11-10T10:32:09,601 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=298cceebf7d48f429a5ffead936f01b2, ASSIGN in 355 msec 2024-11-10T10:32:09,601 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server eccb6f743e07,39345,1731234711428 after splitting done 2024-11-10T10:32:09,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure eccb6f743e07,39345,1731234711428, splitWal=true, meta=false in 622 msec 2024-11-10T10:32:09,907 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2., hostname=eccb6f743e07,44191,1731234711534, seqNum=18] 2024-11-10T10:32:09,923 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=411 (was 411), OpenFileDescriptor=1051 (was 1023) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 216), ProcessCount=11 (was 11), AvailableMemoryMB=8742 (was 8798) 2024-11-10T10:32:09,925 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1051 is superior to 1024 2024-11-10T10:32:09,941 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=411, OpenFileDescriptor=1051, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=8741 2024-11-10T10:32:09,941 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1051 is superior to 1024 2024-11-10T10:32:09,958 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:09,960 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:09,961 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:09,963 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-18215475, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-18215475, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:09,977 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-18215475/hregion-18215475.1731234729963, exclude list is [], retry=0 2024-11-10T10:32:09,979 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:09,980 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:09,980 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:09,983 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-18215475/hregion-18215475.1731234729963 2024-11-10T10:32:09,983 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:09,983 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 210087fa18961ee3be23a75be48b02fa, NAME => 'testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:09,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741893_1071 (size=67) 2024-11-10T10:32:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741893_1071 (size=67) 2024-11-10T10:32:09,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741893_1071 (size=67) 2024-11-10T10:32:09,993 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:09,994 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:09,995 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName a 2024-11-10T10:32:09,996 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:09,996 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:09,996 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:09,997 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName b 2024-11-10T10:32:09,997 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:09,998 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:09,998 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:09,999 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName c 2024-11-10T10:32:09,999 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,000 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,000 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,000 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,001 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,002 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,002 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,002 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:10,003 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,005 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:10,006 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 210087fa18961ee3be23a75be48b02fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64821159, jitterRate=-0.03408946096897125}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 210087fa18961ee3be23a75be48b02fa: Writing region info on filesystem at 1731234729993Initializing all the Stores at 1731234729994 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234729994Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234729994Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234729994Cleaning up temporary data from old regions at 1731234730002 (+8 ms)Region opened successfully at 1731234730007 (+5 ms) 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 210087fa18961ee3be23a75be48b02fa, disabling compactions & flushes 2024-11-10T10:32:10,007 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. after waiting 0 ms 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,007 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,007 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 210087fa18961ee3be23a75be48b02fa: Waiting for close lock at 1731234730007Disabling compacts and flushes for region at 1731234730007Disabling writes for close at 1731234730007Writing region close event to WAL at 1731234730007Closed at 1731234730007 2024-11-10T10:32:10,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741892_1070 (size=95) 2024-11-10T10:32:10,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741892_1070 (size=95) 2024-11-10T10:32:10,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741892_1070 (size=95) 2024-11-10T10:32:10,012 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:10,012 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-18215475:(num 1731234729963) 2024-11-10T10:32:10,012 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:10,013 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:10,027 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, exclude list is [], retry=0 2024-11-10T10:32:10,029 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:10,030 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:10,030 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:10,032 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 2024-11-10T10:32:10,032 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:10,032 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 210087fa18961ee3be23a75be48b02fa, NAME => 'testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:10,032 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:10,032 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,032 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,034 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,034 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName a 2024-11-10T10:32:10,035 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,035 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,035 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,036 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName b 2024-11-10T10:32:10,036 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,036 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,036 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,037 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName c 2024-11-10T10:32:10,037 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,038 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,038 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,038 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,039 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,040 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,040 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,041 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:10,042 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,043 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 210087fa18961ee3be23a75be48b02fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72031158, jitterRate=0.07334789633750916}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:10,043 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 210087fa18961ee3be23a75be48b02fa: Writing region info on filesystem at 1731234730033Initializing all the Stores at 1731234730033Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730033Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730034 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730034Cleaning up temporary data from old regions at 1731234730040 (+6 ms)Region opened successfully at 1731234730043 (+3 ms) 2024-11-10T10:32:10,067 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 210087fa18961ee3be23a75be48b02fa 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-10T10:32:10,083 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/a/4e348204ca104e9f96b4aa8037eb589d is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731234730044/Put/seqid=0 2024-11-10T10:32:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741895_1073 (size=5958) 2024-11-10T10:32:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741895_1073 (size=5958) 2024-11-10T10:32:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741895_1073 (size=5958) 2024-11-10T10:32:10,091 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/a/4e348204ca104e9f96b4aa8037eb589d 2024-11-10T10:32:10,111 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/94050b9cca60466fbcb9be81ef37bf66 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731234730051/Put/seqid=0 2024-11-10T10:32:10,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741896_1074 (size=5958) 2024-11-10T10:32:10,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741896_1074 (size=5958) 2024-11-10T10:32:10,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741896_1074 (size=5958) 2024-11-10T10:32:10,120 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/94050b9cca60466fbcb9be81ef37bf66 2024-11-10T10:32:10,148 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/c/fd69f37ab3ff4a46b9de3c392a5aa328 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731234730058/Put/seqid=0 2024-11-10T10:32:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741897_1075 (size=5958) 2024-11-10T10:32:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741897_1075 (size=5958) 2024-11-10T10:32:10,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741897_1075 (size=5958) 2024-11-10T10:32:10,156 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/c/fd69f37ab3ff4a46b9de3c392a5aa328 2024-11-10T10:32:10,162 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/a/4e348204ca104e9f96b4aa8037eb589d as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/a/4e348204ca104e9f96b4aa8037eb589d 2024-11-10T10:32:10,168 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/a/4e348204ca104e9f96b4aa8037eb589d, entries=10, sequenceid=33, filesize=5.8 K 2024-11-10T10:32:10,169 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/94050b9cca60466fbcb9be81ef37bf66 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/94050b9cca60466fbcb9be81ef37bf66 2024-11-10T10:32:10,175 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/94050b9cca60466fbcb9be81ef37bf66, entries=10, sequenceid=33, filesize=5.8 K 2024-11-10T10:32:10,176 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/c/fd69f37ab3ff4a46b9de3c392a5aa328 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/c/fd69f37ab3ff4a46b9de3c392a5aa328 2024-11-10T10:32:10,182 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/c/fd69f37ab3ff4a46b9de3c392a5aa328, entries=10, sequenceid=33, filesize=5.8 K 2024-11-10T10:32:10,183 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 210087fa18961ee3be23a75be48b02fa in 116ms, sequenceid=33, compaction requested=false 2024-11-10T10:32:10,183 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 210087fa18961ee3be23a75be48b02fa: 2024-11-10T10:32:10,183 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 210087fa18961ee3be23a75be48b02fa, disabling compactions & flushes 2024-11-10T10:32:10,183 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,183 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,183 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. after waiting 0 ms 2024-11-10T10:32:10,183 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,185 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. 2024-11-10T10:32:10,185 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 210087fa18961ee3be23a75be48b02fa: Waiting for close lock at 1731234730183Disabling compacts and flushes for region at 1731234730183Disabling writes for close at 1731234730183Writing region close event to WAL at 1731234730185 (+2 ms)Closed at 1731234730185 2024-11-10T10:32:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741894_1072 (size=3385) 2024-11-10T10:32:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741894_1072 (size=3385) 2024-11-10T10:32:10,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741894_1072 (size=3385) 2024-11-10T10:32:10,193 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/94050b9cca60466fbcb9be81ef37bf66 to hdfs://localhost:39939/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/94050b9cca60466fbcb9be81ef37bf66 2024-11-10T10:32:10,210 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, size=3.3 K (3385bytes) 2024-11-10T10:32:10,210 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 2024-11-10T10:32:10,211 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 after 1ms 2024-11-10T10:32:10,213 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:10,213 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 took 3ms 2024-11-10T10:32:10,215 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 so closing down 2024-11-10T10:32:10,215 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:10,216 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731234730014.temp 2024-11-10T10:32:10,217 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp 2024-11-10T10:32:10,218 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741898_1076 (size=2944) 2024-11-10T10:32:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741898_1076 (size=2944) 2024-11-10T10:32:10,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741898_1076 (size=2944) 2024-11-10T10:32:10,225 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:10,227 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 2024-11-10T10:32:10,227 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 14 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, size=3.3 K, length=3385, corrupted=false, cancelled=false 2024-11-10T10:32:10,227 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, journal: Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, size=3.3 K (3385bytes) at 1731234730210Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 so closing down at 1731234730215 (+5 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp at 1731234730217 (+2 ms)3 split writer threads finished at 1731234730218 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731234730225 (+7 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000003-wal.1731234730014.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 at 1731234730227 (+2 ms)Processed 32 edits across 1 Regions in 14 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014, size=3.3 K, length=3385, corrupted=false, cancelled=false at 1731234730227 2024-11-10T10:32:10,228 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730014 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234730014 2024-11-10T10:32:10,229 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 2024-11-10T10:32:10,229 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:10,231 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:10,245 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231, exclude list is [], retry=0 2024-11-10T10:32:10,247 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:10,248 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:10,248 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:10,249 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 2024-11-10T10:32:10,250 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:10,250 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 210087fa18961ee3be23a75be48b02fa, NAME => 'testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:10,250 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:10,250 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,250 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,251 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,252 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName a 2024-11-10T10:32:10,252 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,257 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/a/4e348204ca104e9f96b4aa8037eb589d 2024-11-10T10:32:10,257 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,257 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,258 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName b 2024-11-10T10:32:10,258 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,259 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,259 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,259 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 210087fa18961ee3be23a75be48b02fa columnFamilyName c 2024-11-10T10:32:10,259 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,264 DEBUG [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/c/fd69f37ab3ff4a46b9de3c392a5aa328 2024-11-10T10:32:10,264 INFO [StoreOpener-210087fa18961ee3be23a75be48b02fa-1 {}] regionserver.HStore(327): Store=210087fa18961ee3be23a75be48b02fa/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,265 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,265 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,266 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,267 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 2024-11-10T10:32:10,269 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:10,270 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 2024-11-10T10:32:10,270 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 210087fa18961ee3be23a75be48b02fa 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-10T10:32:10,285 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/8032890f88764d4e8d0747f7cb74d5e8 is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731234730051/Put/seqid=0 2024-11-10T10:32:10,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741900_1078 (size=5958) 2024-11-10T10:32:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741900_1078 (size=5958) 2024-11-10T10:32:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741900_1078 (size=5958) 2024-11-10T10:32:10,292 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/8032890f88764d4e8d0747f7cb74d5e8 2024-11-10T10:32:10,298 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/.tmp/b/8032890f88764d4e8d0747f7cb74d5e8 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/8032890f88764d4e8d0747f7cb74d5e8 2024-11-10T10:32:10,303 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/b/8032890f88764d4e8d0747f7cb74d5e8, entries=10, sequenceid=32, filesize=5.8 K 2024-11-10T10:32:10,303 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 210087fa18961ee3be23a75be48b02fa in 33ms, sequenceid=32, compaction requested=false; wal=null 2024-11-10T10:32:10,304 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/0000000000000000032 2024-11-10T10:32:10,305 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,305 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,306 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:10,307 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 210087fa18961ee3be23a75be48b02fa 2024-11-10T10:32:10,309 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/210087fa18961ee3be23a75be48b02fa/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-11-10T10:32:10,310 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 210087fa18961ee3be23a75be48b02fa; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59140456, jitterRate=-0.11873853206634521}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:10,311 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 210087fa18961ee3be23a75be48b02fa: Writing region info on filesystem at 1731234730250Initializing all the Stores at 1731234730251 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730251Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730251Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730251Obtaining lock to block concurrent updates at 1731234730270 (+19 ms)Preparing flush snapshotting stores in 210087fa18961ee3be23a75be48b02fa at 1731234730270Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1731234730270Flushing stores of testReplayEditsWrittenViaHRegion,,1731234729959.210087fa18961ee3be23a75be48b02fa. at 1731234730270Flushing 210087fa18961ee3be23a75be48b02fa/b: creating writer at 1731234730270Flushing 210087fa18961ee3be23a75be48b02fa/b: appending metadata at 1731234730284 (+14 ms)Flushing 210087fa18961ee3be23a75be48b02fa/b: closing flushed file at 1731234730284Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@137aeb57: reopening flushed file at 1731234730298 (+14 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 210087fa18961ee3be23a75be48b02fa in 33ms, sequenceid=32, compaction requested=false; wal=null at 1731234730304 (+6 ms)Cleaning up temporary data from old regions at 1731234730305 (+1 ms)Region opened successfully at 1731234730311 (+6 ms) 2024-11-10T10:32:10,334 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=420 (was 411) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:43140 [Waiting for operation #40] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:50218 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:54926 [Waiting for operation #33] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:50362 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:55072 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:43284 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741899_1077] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1127 (was 1051) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=215 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=8731 (was 8741) 2024-11-10T10:32:10,335 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1127 is superior to 1024 2024-11-10T10:32:10,349 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=420, OpenFileDescriptor=1127, MaxFileDescriptor=1048576, SystemLoadAverage=215, ProcessCount=11, AvailableMemoryMB=8730 2024-11-10T10:32:10,350 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1127 is superior to 1024 2024-11-10T10:32:10,370 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:10,373 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:10,373 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:10,376 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-24768029, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-24768029, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:10,394 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-24768029/hregion-24768029.1731234730377, exclude list is [], retry=0 2024-11-10T10:32:10,397 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:10,398 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:10,401 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:10,411 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-24768029/hregion-24768029.1731234730377 2024-11-10T10:32:10,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:10,412 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 1a0d1631be9e13223d348d31267e69bf, NAME => 'testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:10,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741902_1080 (size=68) 2024-11-10T10:32:10,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741902_1080 (size=68) 2024-11-10T10:32:10,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741902_1080 (size=68) 2024-11-10T10:32:10,432 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:10,433 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,435 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName a 2024-11-10T10:32:10,435 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,435 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,435 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,437 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName b 2024-11-10T10:32:10,437 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,437 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,437 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,438 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName c 2024-11-10T10:32:10,438 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,439 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,439 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,440 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,440 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,442 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,442 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,443 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:10,444 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,446 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:10,447 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1a0d1631be9e13223d348d31267e69bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59030148, jitterRate=-0.12038224935531616}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1a0d1631be9e13223d348d31267e69bf: Writing region info on filesystem at 1731234730432Initializing all the Stores at 1731234730433 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730433Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730433Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730433Cleaning up temporary data from old regions at 1731234730442 (+9 ms)Region opened successfully at 1731234730447 (+5 ms) 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1a0d1631be9e13223d348d31267e69bf, disabling compactions & flushes 2024-11-10T10:32:10,448 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. after waiting 0 ms 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:10,448 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:10,448 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1a0d1631be9e13223d348d31267e69bf: Waiting for close lock at 1731234730448Disabling compacts and flushes for region at 1731234730448Disabling writes for close at 1731234730448Writing region close event to WAL at 1731234730448Closed at 1731234730448 2024-11-10T10:32:10,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741901_1079 (size=95) 2024-11-10T10:32:10,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741901_1079 (size=95) 2024-11-10T10:32:10,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741901_1079 (size=95) 2024-11-10T10:32:10,456 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:10,456 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-24768029:(num 1731234730377) 2024-11-10T10:32:10,456 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:10,458 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:10,472 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, exclude list is [], retry=0 2024-11-10T10:32:10,475 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:10,475 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:10,475 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:10,477 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 2024-11-10T10:32:10,477 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:10,532 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1a0d1631be9e13223d348d31267e69bf, NAME => 'testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:10,535 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,535 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:10,535 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,535 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,537 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,538 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName a 2024-11-10T10:32:10,538 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,539 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,539 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,540 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName b 2024-11-10T10:32:10,540 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,541 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,541 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,542 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName c 2024-11-10T10:32:10,542 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:10,542 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:10,542 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,543 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,544 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,545 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,545 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,546 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:10,547 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,548 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1a0d1631be9e13223d348d31267e69bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73391126, jitterRate=0.09361299872398376}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:10,548 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:10,548 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1a0d1631be9e13223d348d31267e69bf: Running coprocessor pre-open hook at 1731234730535Writing region info on filesystem at 1731234730535Initializing all the Stores at 1731234730536 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730537 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730537Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234730537Cleaning up temporary data from old regions at 1731234730545 (+8 ms)Running coprocessor post-open hooks at 1731234730548 (+3 ms)Region opened successfully at 1731234730548 2024-11-10T10:32:10,559 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1a0d1631be9e13223d348d31267e69bf 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-11-10T10:32:10,560 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:11,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-11-10T10:32:11,162 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:11,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-11-10T10:32:11,163 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:11,164 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-11-10T10:32:11,164 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:11,165 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-11-10T10:32:11,165 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:11,165 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-11-10T10:32:11,165 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:11,561 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:12,425 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:32:12,562 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:13,563 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:14,563 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:15,564 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:16,565 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:17,565 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:18,566 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:19,567 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for 1a0d1631be9e13223d348d31267e69bf/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:19,568 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1a0d1631be9e13223d348d31267e69bf: 2024-11-10T10:32:19,568 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,582 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1a0d1631be9e13223d348d31267e69bf: 2024-11-10T10:32:19,582 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-11-10T10:32:19,582 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 1a0d1631be9e13223d348d31267e69bf, disabling compactions & flushes 2024-11-10T10:32:19,582 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,582 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,582 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. after waiting 0 ms 2024-11-10T10:32:19,582 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,583 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,583 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. 2024-11-10T10:32:19,583 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 1a0d1631be9e13223d348d31267e69bf: Waiting for close lock at 1731234739582Running coprocessor pre-close hooks at 1731234739582Disabling compacts and flushes for region at 1731234739582Disabling writes for close at 1731234739582Writing region close event to WAL at 1731234739582Running coprocessor post-close hooks at 1731234739583 (+1 ms)Closed at 1731234739583 2024-11-10T10:32:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741903_1081 (size=2688) 2024-11-10T10:32:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741903_1081 (size=2688) 2024-11-10T10:32:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741903_1081 (size=2688) 2024-11-10T10:32:19,602 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, size=2.6 K (2688bytes) 2024-11-10T10:32:19,602 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 2024-11-10T10:32:19,602 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 after 0ms 2024-11-10T10:32:19,605 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:19,605 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 took 3ms 2024-11-10T10:32:19,607 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 so closing down 2024-11-10T10:32:19,607 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:19,608 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1731234730459.temp 2024-11-10T10:32:19,610 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp 2024-11-10T10:32:19,610 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:19,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741904_1082 (size=2094) 2024-11-10T10:32:19,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741904_1082 (size=2094) 2024-11-10T10:32:19,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741904_1082 (size=2094) 2024-11-10T10:32:19,621 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:19,623 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 2024-11-10T10:32:19,623 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 18 ms; skipped=3; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, size=2.6 K, length=2688, corrupted=false, cancelled=false 2024-11-10T10:32:19,623 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, journal: Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, size=2.6 K (2688bytes) at 1731234739602Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 so closing down at 1731234739607 (+5 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp at 1731234739610 (+3 ms)3 split writer threads finished at 1731234739610Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1731234739621 (+11 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000004-wal.1731234730459.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 at 1731234739623 (+2 ms)Processed 23 edits across 1 Regions in 18 ms; skipped=3; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459, size=2.6 K, length=2688, corrupted=false, cancelled=false at 1731234739623 2024-11-10T10:32:19,625 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234730459 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234730459 2024-11-10T10:32:19,626 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 2024-11-10T10:32:19,626 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:19,628 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:19,642 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234739628, exclude list is [], retry=0 2024-11-10T10:32:19,644 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:19,644 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:19,645 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:19,646 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731234730369/wal.1731234739628 2024-11-10T10:32:19,646 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:19,647 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1a0d1631be9e13223d348d31267e69bf, NAME => 'testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:19,647 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,648 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:19,648 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,648 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,650 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,651 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName a 2024-11-10T10:32:19,651 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:19,651 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:19,652 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,652 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName b 2024-11-10T10:32:19,652 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:19,653 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:19,653 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,654 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a0d1631be9e13223d348d31267e69bf columnFamilyName c 2024-11-10T10:32:19,654 DEBUG [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:19,654 INFO [StoreOpener-1a0d1631be9e13223d348d31267e69bf-1 {}] regionserver.HStore(327): Store=1a0d1631be9e13223d348d31267e69bf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:19,654 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,655 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,656 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:19,657 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 2024-11-10T10:32:19,659 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:19,661 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 2024-11-10T10:32:19,661 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1a0d1631be9e13223d348d31267e69bf 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-11-10T10:32:19,684 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/a/1fc1594597764d85b0e0752079de4746 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1731234739574/Put/seqid=0 2024-11-10T10:32:19,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741906_1084 (size=5523) 2024-11-10T10:32:19,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741906_1084 (size=5523) 2024-11-10T10:32:19,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741906_1084 (size=5523) 2024-11-10T10:32:19,899 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:32:20,092 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/a/1fc1594597764d85b0e0752079de4746 2024-11-10T10:32:20,117 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/b/de5ab17ff6ca4648b8803749d82babef is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1731234739569/Put/seqid=0 2024-11-10T10:32:20,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741907_1085 (size=5524) 2024-11-10T10:32:20,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741907_1085 (size=5524) 2024-11-10T10:32:20,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741907_1085 (size=5524) 2024-11-10T10:32:20,124 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/b/de5ab17ff6ca4648b8803749d82babef 2024-11-10T10:32:20,143 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/c/4ebc8b07c62b4a3e8d6a86bf710a467f is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1731234739571/Put/seqid=0 2024-11-10T10:32:20,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741908_1086 (size=5457) 2024-11-10T10:32:20,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741908_1086 (size=5457) 2024-11-10T10:32:20,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741908_1086 (size=5457) 2024-11-10T10:32:20,150 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/c/4ebc8b07c62b4a3e8d6a86bf710a467f 2024-11-10T10:32:20,155 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/a/1fc1594597764d85b0e0752079de4746 as hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/a/1fc1594597764d85b0e0752079de4746 2024-11-10T10:32:20,160 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/a/1fc1594597764d85b0e0752079de4746, entries=7, sequenceid=26, filesize=5.4 K 2024-11-10T10:32:20,161 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/b/de5ab17ff6ca4648b8803749d82babef as hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/b/de5ab17ff6ca4648b8803749d82babef 2024-11-10T10:32:20,166 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/b/de5ab17ff6ca4648b8803749d82babef, entries=7, sequenceid=26, filesize=5.4 K 2024-11-10T10:32:20,167 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/.tmp/c/4ebc8b07c62b4a3e8d6a86bf710a467f as hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/c/4ebc8b07c62b4a3e8d6a86bf710a467f 2024-11-10T10:32:20,171 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/c/4ebc8b07c62b4a3e8d6a86bf710a467f, entries=6, sequenceid=26, filesize=5.3 K 2024-11-10T10:32:20,172 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 1a0d1631be9e13223d348d31267e69bf in 510ms, sequenceid=26, compaction requested=false; wal=null 2024-11-10T10:32:20,172 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/0000000000000000026 2024-11-10T10:32:20,173 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:20,173 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:20,174 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:20,175 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:20,177 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsAfterAbortingFlush/1a0d1631be9e13223d348d31267e69bf/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-11-10T10:32:20,178 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 1a0d1631be9e13223d348d31267e69bf; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74807303, jitterRate=0.11471568048000336}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:20,178 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1a0d1631be9e13223d348d31267e69bf 2024-11-10T10:32:20,179 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 1a0d1631be9e13223d348d31267e69bf: Running coprocessor pre-open hook at 1731234739648Writing region info on filesystem at 1731234739648Initializing all the Stores at 1731234739649 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234739649Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234739649Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234739649Obtaining lock to block concurrent updates at 1731234739661 (+12 ms)Preparing flush snapshotting stores in 1a0d1631be9e13223d348d31267e69bf at 1731234739661Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1731234739662 (+1 ms)Flushing stores of testReplayEditsAfterAbortingFlush,,1731234730371.1a0d1631be9e13223d348d31267e69bf. at 1731234739662Flushing 1a0d1631be9e13223d348d31267e69bf/a: creating writer at 1731234739662Flushing 1a0d1631be9e13223d348d31267e69bf/a: appending metadata at 1731234739683 (+21 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/a: closing flushed file at 1731234739684 (+1 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/b: creating writer at 1731234740099 (+415 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/b: appending metadata at 1731234740117 (+18 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/b: closing flushed file at 1731234740117Flushing 1a0d1631be9e13223d348d31267e69bf/c: creating writer at 1731234740129 (+12 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/c: appending metadata at 1731234740143 (+14 ms)Flushing 1a0d1631be9e13223d348d31267e69bf/c: closing flushed file at 1731234740143Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5af04083: reopening flushed file at 1731234740155 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28af58cf: reopening flushed file at 1731234740160 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ca88d61: reopening flushed file at 1731234740166 (+6 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for 1a0d1631be9e13223d348d31267e69bf in 510ms, sequenceid=26, compaction requested=false; wal=null at 1731234740172 (+6 ms)Cleaning up temporary data from old regions at 1731234740173 (+1 ms)Running coprocessor post-open hooks at 1731234740178 (+5 ms)Region opened successfully at 1731234740179 (+1 ms) 2024-11-10T10:32:20,202 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=417 (was 420), OpenFileDescriptor=1183 (was 1127) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 215), ProcessCount=11 (was 11), AvailableMemoryMB=8719 (was 8730) 2024-11-10T10:32:20,202 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1183 is superior to 1024 2024-11-10T10:32:20,215 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=417, OpenFileDescriptor=1183, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=8718 2024-11-10T10:32:20,215 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1183 is superior to 1024 2024-11-10T10:32:20,230 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:20,232 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:20,233 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:20,235 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-04446415, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-04446415, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:20,251 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-04446415/hregion-04446415.1731234740235, exclude list is [], retry=0 2024-11-10T10:32:20,253 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:20,254 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:20,254 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:20,256 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-04446415/hregion-04446415.1731234740235 2024-11-10T10:32:20,256 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:20,256 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 25ce6d54d29c6ca636e580da0da36ec0, NAME => 'testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:20,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741910_1088 (size=61) 2024-11-10T10:32:20,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741910_1088 (size=61) 2024-11-10T10:32:20,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741910_1088 (size=61) 2024-11-10T10:32:20,265 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,266 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,267 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25ce6d54d29c6ca636e580da0da36ec0 columnFamilyName a 2024-11-10T10:32:20,267 DEBUG [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,268 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(327): Store=25ce6d54d29c6ca636e580da0da36ec0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,268 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,268 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,269 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,269 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,269 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,270 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,272 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:20,273 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 25ce6d54d29c6ca636e580da0da36ec0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68312697, jitterRate=0.017938509583473206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:32:20,273 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 25ce6d54d29c6ca636e580da0da36ec0: Writing region info on filesystem at 1731234740265Initializing all the Stores at 1731234740265Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740266 (+1 ms)Cleaning up temporary data from old regions at 1731234740269 (+3 ms)Region opened successfully at 1731234740273 (+4 ms) 2024-11-10T10:32:20,273 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 25ce6d54d29c6ca636e580da0da36ec0, disabling compactions & flushes 2024-11-10T10:32:20,273 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,274 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,274 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. after waiting 0 ms 2024-11-10T10:32:20,274 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,274 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,274 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 25ce6d54d29c6ca636e580da0da36ec0: Waiting for close lock at 1731234740273Disabling compacts and flushes for region at 1731234740273Disabling writes for close at 1731234740274 (+1 ms)Writing region close event to WAL at 1731234740274Closed at 1731234740274 2024-11-10T10:32:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741909_1087 (size=95) 2024-11-10T10:32:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741909_1087 (size=95) 2024-11-10T10:32:20,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741909_1087 (size=95) 2024-11-10T10:32:20,278 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:20,278 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-04446415:(num 1731234740235) 2024-11-10T10:32:20,279 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:20,280 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:20,293 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, exclude list is [], retry=0 2024-11-10T10:32:20,295 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:20,296 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:20,296 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:20,298 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 2024-11-10T10:32:20,298 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:20,298 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 25ce6d54d29c6ca636e580da0da36ec0, NAME => 'testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:20,298 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,298 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,298 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,300 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,301 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25ce6d54d29c6ca636e580da0da36ec0 columnFamilyName a 2024-11-10T10:32:20,301 DEBUG [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,301 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(327): Store=25ce6d54d29c6ca636e580da0da36ec0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,302 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,302 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,303 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,304 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,304 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,306 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,307 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 25ce6d54d29c6ca636e580da0da36ec0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72694679, jitterRate=0.08323512971401215}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:32:20,307 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 25ce6d54d29c6ca636e580da0da36ec0: Writing region info on filesystem at 1731234740299Initializing all the Stores at 1731234740299Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740299Cleaning up temporary data from old regions at 1731234740304 (+5 ms)Region opened successfully at 1731234740307 (+3 ms) 2024-11-10T10:32:20,318 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 25ce6d54d29c6ca636e580da0da36ec0, disabling compactions & flushes 2024-11-10T10:32:20,318 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,318 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,318 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. after waiting 0 ms 2024-11-10T10:32:20,318 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,319 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,319 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. 2024-11-10T10:32:20,319 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 25ce6d54d29c6ca636e580da0da36ec0: Waiting for close lock at 1731234740318Disabling compacts and flushes for region at 1731234740318Disabling writes for close at 1731234740318Writing region close event to WAL at 1731234740319 (+1 ms)Closed at 1731234740319 2024-11-10T10:32:20,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741911_1089 (size=1050) 2024-11-10T10:32:20,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741911_1089 (size=1050) 2024-11-10T10:32:20,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741911_1089 (size=1050) 2024-11-10T10:32:20,349 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, size=1.0 K (1050bytes) 2024-11-10T10:32:20,349 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 2024-11-10T10:32:20,349 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 after 0ms 2024-11-10T10:32:20,352 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:20,352 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 took 3ms 2024-11-10T10:32:20,354 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 so closing down 2024-11-10T10:32:20,354 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:20,355 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731234740280.temp 2024-11-10T10:32:20,356 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp 2024-11-10T10:32:20,357 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:20,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741912_1090 (size=1050) 2024-11-10T10:32:20,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741912_1090 (size=1050) 2024-11-10T10:32:20,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741912_1090 (size=1050) 2024-11-10T10:32:20,364 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:20,365 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp to hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 2024-11-10T10:32:20,365 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-11-10T10:32:20,365 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, journal: Splitting hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, size=1.0 K (1050bytes) at 1731234740349Finishing writing output for hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 so closing down at 1731234740354 (+5 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp at 1731234740356 (+2 ms)3 split writer threads finished at 1731234740357 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1731234740364 (+7 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000003-wal.1731234740280.temp to hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 at 1731234740365 (+1 ms)Processed 10 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1731234740365 2024-11-10T10:32:20,367 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740280 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234740280 2024-11-10T10:32:20,368 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 2024-11-10T10:32:20,371 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:20,737 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:20,740 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:20,753 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740740, exclude list is [], retry=0 2024-11-10T10:32:20,756 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:20,756 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:20,756 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:20,758 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731234740229/wal.1731234740740 2024-11-10T10:32:20,758 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:20,758 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 25ce6d54d29c6ca636e580da0da36ec0, NAME => 'testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:20,758 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,759 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,759 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,762 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,764 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25ce6d54d29c6ca636e580da0da36ec0 columnFamilyName a 2024-11-10T10:32:20,764 DEBUG [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,765 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(327): Store=25ce6d54d29c6ca636e580da0da36ec0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,766 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,766 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,767 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,768 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 2024-11-10T10:32:20,770 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:20,771 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 2024-11-10T10:32:20,771 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 25ce6d54d29c6ca636e580da0da36ec0 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-11-10T10:32:20,789 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/.tmp/a/a327ea777fe04159a8b034deabc8c422 is 79, key is testDatalossWhenInputError/a:x0/1731234740307/Put/seqid=0 2024-11-10T10:32:20,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741914_1092 (size=5808) 2024-11-10T10:32:20,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741914_1092 (size=5808) 2024-11-10T10:32:20,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741914_1092 (size=5808) 2024-11-10T10:32:20,799 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/.tmp/a/a327ea777fe04159a8b034deabc8c422 2024-11-10T10:32:20,811 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/.tmp/a/a327ea777fe04159a8b034deabc8c422 as hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/a/a327ea777fe04159a8b034deabc8c422 2024-11-10T10:32:20,819 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/a/a327ea777fe04159a8b034deabc8c422, entries=10, sequenceid=12, filesize=5.7 K 2024-11-10T10:32:20,819 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 25ce6d54d29c6ca636e580da0da36ec0 in 48ms, sequenceid=12, compaction requested=false; wal=null 2024-11-10T10:32:20,820 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/0000000000000000012 2024-11-10T10:32:20,821 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,821 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,824 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,826 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-11-10T10:32:20,827 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 25ce6d54d29c6ca636e580da0da36ec0; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69517894, jitterRate=0.03589734435081482}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:32:20,828 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 25ce6d54d29c6ca636e580da0da36ec0: Writing region info on filesystem at 1731234740759Initializing all the Stores at 1731234740762 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740762Obtaining lock to block concurrent updates at 1731234740771 (+9 ms)Preparing flush snapshotting stores in 25ce6d54d29c6ca636e580da0da36ec0 at 1731234740771Finished memstore snapshotting testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1731234740772 (+1 ms)Flushing stores of testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0. at 1731234740772Flushing 25ce6d54d29c6ca636e580da0da36ec0/a: creating writer at 1731234740772Flushing 25ce6d54d29c6ca636e580da0da36ec0/a: appending metadata at 1731234740788 (+16 ms)Flushing 25ce6d54d29c6ca636e580da0da36ec0/a: closing flushed file at 1731234740788Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@393f2270: reopening flushed file at 1731234740809 (+21 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 25ce6d54d29c6ca636e580da0da36ec0 in 48ms, sequenceid=12, compaction requested=false; wal=null at 1731234740819 (+10 ms)Cleaning up temporary data from old regions at 1731234740821 (+2 ms)Region opened successfully at 1731234740828 (+7 ms) 2024-11-10T10:32:20,831 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 25ce6d54d29c6ca636e580da0da36ec0, NAME => 'testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:20,832 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731234740231.25ce6d54d29c6ca636e580da0da36ec0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,832 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,832 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,833 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,834 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 25ce6d54d29c6ca636e580da0da36ec0 columnFamilyName a 2024-11-10T10:32:20,834 DEBUG [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,839 DEBUG [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/a/a327ea777fe04159a8b034deabc8c422 2024-11-10T10:32:20,840 INFO [StoreOpener-25ce6d54d29c6ca636e580da0da36ec0-1 {}] regionserver.HStore(327): Store=25ce6d54d29c6ca636e580da0da36ec0/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,840 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,840 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,842 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,842 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,842 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,844 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 25ce6d54d29c6ca636e580da0da36ec0 2024-11-10T10:32:20,846 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testDatalossWhenInputError/25ce6d54d29c6ca636e580da0da36ec0/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-11-10T10:32:20,847 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 25ce6d54d29c6ca636e580da0da36ec0; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63011735, jitterRate=-0.061051979660987854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-10T10:32:20,847 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 25ce6d54d29c6ca636e580da0da36ec0: Writing region info on filesystem at 1731234740832Initializing all the Stores at 1731234740833 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740833Cleaning up temporary data from old regions at 1731234740842 (+9 ms)Region opened successfully at 1731234740847 (+5 ms) 2024-11-10T10:32:20,866 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=427 (was 417) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:53422 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:44878 [Waiting for operation #19] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:56540 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:48488 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42524 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42696 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1264 (was 1183) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=8710 (was 8718) 2024-11-10T10:32:20,866 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-11-10T10:32:20,879 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=427, OpenFileDescriptor=1264, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=8707 2024-11-10T10:32:20,879 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1264 is superior to 1024 2024-11-10T10:32:20,894 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:20,896 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:20,896 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:20,898 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-94422723, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-94422723, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:20,911 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-94422723/hregion-94422723.1731234740899, exclude list is [], retry=0 2024-11-10T10:32:20,914 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:20,914 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:20,914 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:20,917 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-94422723/hregion-94422723.1731234740899 2024-11-10T10:32:20,919 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:20,919 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 6a7880317438693e0b317caf967f3a0e, NAME => 'testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:20,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741916_1094 (size=63) 2024-11-10T10:32:20,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741916_1094 (size=63) 2024-11-10T10:32:20,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741916_1094 (size=63) 2024-11-10T10:32:20,931 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,933 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,935 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName a 2024-11-10T10:32:20,935 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,935 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,935 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,937 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName b 2024-11-10T10:32:20,937 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,938 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,938 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,939 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName c 2024-11-10T10:32:20,939 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,940 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,940 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,941 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,941 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,942 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,942 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,943 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:20,944 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,947 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:20,947 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 6a7880317438693e0b317caf967f3a0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64229214, jitterRate=-0.0429101288318634}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:20,948 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 6a7880317438693e0b317caf967f3a0e: Writing region info on filesystem at 1731234740932Initializing all the Stores at 1731234740932Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740933 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740933Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740933Cleaning up temporary data from old regions at 1731234740942 (+9 ms)Region opened successfully at 1731234740948 (+6 ms) 2024-11-10T10:32:20,948 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 6a7880317438693e0b317caf967f3a0e, disabling compactions & flushes 2024-11-10T10:32:20,948 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:20,948 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:20,948 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. after waiting 0 ms 2024-11-10T10:32:20,948 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:20,949 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:20,949 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 6a7880317438693e0b317caf967f3a0e: Waiting for close lock at 1731234740948Disabling compacts and flushes for region at 1731234740948Disabling writes for close at 1731234740948Writing region close event to WAL at 1731234740949 (+1 ms)Closed at 1731234740949 2024-11-10T10:32:20,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741915_1093 (size=95) 2024-11-10T10:32:20,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741915_1093 (size=95) 2024-11-10T10:32:20,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741915_1093 (size=95) 2024-11-10T10:32:20,954 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:20,954 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-94422723:(num 1731234740899) 2024-11-10T10:32:20,954 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:20,957 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:20,978 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, exclude list is [], retry=0 2024-11-10T10:32:20,981 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:20,981 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:20,981 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:20,983 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 2024-11-10T10:32:20,984 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657)] 2024-11-10T10:32:20,984 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 6a7880317438693e0b317caf967f3a0e, NAME => 'testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:20,984 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:20,984 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,984 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,986 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,987 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName a 2024-11-10T10:32:20,987 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,987 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,987 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,988 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName b 2024-11-10T10:32:20,988 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,989 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,989 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,990 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName c 2024-11-10T10:32:20,990 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:20,990 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:20,990 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,991 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,992 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,993 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,993 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,994 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:20,995 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:20,996 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 6a7880317438693e0b317caf967f3a0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73035185, jitterRate=0.08830906450748444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:20,997 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 6a7880317438693e0b317caf967f3a0e: Writing region info on filesystem at 1731234740984Initializing all the Stores at 1731234740985 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740985Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740986 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234740986Cleaning up temporary data from old regions at 1731234740993 (+7 ms)Region opened successfully at 1731234740997 (+4 ms) 2024-11-10T10:32:21,001 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1731234741001/Put/seqid=0 2024-11-10T10:32:21,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741918_1096 (size=4875) 2024-11-10T10:32:21,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741918_1096 (size=4875) 2024-11-10T10:32:21,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741918_1096 (size=4875) 2024-11-10T10:32:21,017 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1731234741017/Put/seqid=0 2024-11-10T10:32:21,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741919_1097 (size=4875) 2024-11-10T10:32:21,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741919_1097 (size=4875) 2024-11-10T10:32:21,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741919_1097 (size=4875) 2024-11-10T10:32:21,029 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1731234741029/Put/seqid=0 2024-11-10T10:32:21,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741920_1098 (size=4875) 2024-11-10T10:32:21,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741920_1098 (size=4875) 2024-11-10T10:32:21,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741920_1098 (size=4875) 2024-11-10T10:32:21,041 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,045 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-11-10T10:32:21,045 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-10T10:32:21,045 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,049 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-11-10T10:32:21,049 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-10T10:32:21,049 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,052 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-11-10T10:32:21,052 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-10T10:32:21,052 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 6a7880317438693e0b317caf967f3a0e 3/3 column families, dataSize=51 B heapSize=896 B 2024-11-10T10:32:21,071 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp/a/d0ae04ebf4ac4cba9e75264d43606dd9 is 55, key is testCompactedBulkLoadedFiles/a:a/1731234740997/Put/seqid=0 2024-11-10T10:32:21,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741921_1099 (size=5107) 2024-11-10T10:32:21,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741921_1099 (size=5107) 2024-11-10T10:32:21,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741921_1099 (size=5107) 2024-11-10T10:32:21,079 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp/a/d0ae04ebf4ac4cba9e75264d43606dd9 2024-11-10T10:32:21,084 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp/a/d0ae04ebf4ac4cba9e75264d43606dd9 as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9 2024-11-10T10:32:21,089 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9, entries=1, sequenceid=4, filesize=5.0 K 2024-11-10T10:32:21,090 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 6a7880317438693e0b317caf967f3a0e in 38ms, sequenceid=4, compaction requested=false 2024-11-10T10:32:21,090 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 6a7880317438693e0b317caf967f3a0e: 2024-11-10T10:32:21,091 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ 2024-11-10T10:32:21,092 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ 2024-11-10T10:32:21,093 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ 2024-11-10T10:32:21,093 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile0 into 6a7880317438693e0b317caf967f3a0e/a as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ - updating store file list. 2024-11-10T10:32:21,097 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:21,098 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ into 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,098 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile0 into 6a7880317438693e0b317caf967f3a0e/a (new location: hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_) 2024-11-10T10:32:21,098 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile1 into 6a7880317438693e0b317caf967f3a0e/a as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ - updating store file list. 2024-11-10T10:32:21,103 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:21,103 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ into 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,103 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile1 into 6a7880317438693e0b317caf967f3a0e/a (new location: hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_) 2024-11-10T10:32:21,104 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile2 into 6a7880317438693e0b317caf967f3a0e/a as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ - updating store file list. 2024-11-10T10:32:21,107 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9207645448be4cdd865a7bbd6eb80c27_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:21,108 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ into 6a7880317438693e0b317caf967f3a0e/a 2024-11-10T10:32:21,108 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39939/hbase/testCompactedBulkLoadedFiles/hfile2 into 6a7880317438693e0b317caf967f3a0e/a (new location: hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_) 2024-11-10T10:32:21,115 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-10T10:32:21,115 DEBUG [Time-limited test {}] regionserver.HStore(1541): 6a7880317438693e0b317caf967f3a0e/a is initiating major compaction (all files) 2024-11-10T10:32:21,115 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 6a7880317438693e0b317caf967f3a0e/a in testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:21,115 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_] into tmpdir=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp, totalSize=19.3 K 2024-11-10T10:32:21,116 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d0ae04ebf4ac4cba9e75264d43606dd9, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1731234740997 2024-11-10T10:32:21,116 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-10T10:32:21,117 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-10T10:32:21,117 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9207645448be4cdd865a7bbd6eb80c27_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-10T10:32:21,129 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp/a/74b1ef4692c74b88be3a7ddc23d4f689 is 55, key is testCompactedBulkLoadedFiles/a:a/1731234740997/Put/seqid=0 2024-11-10T10:32:21,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741922_1100 (size=6154) 2024-11-10T10:32:21,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741922_1100 (size=6154) 2024-11-10T10:32:21,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741922_1100 (size=6154) 2024-11-10T10:32:21,150 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/.tmp/a/74b1ef4692c74b88be3a7ddc23d4f689 as hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/74b1ef4692c74b88be3a7ddc23d4f689 2024-11-10T10:32:21,159 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 6a7880317438693e0b317caf967f3a0e/a of 6a7880317438693e0b317caf967f3a0e into 74b1ef4692c74b88be3a7ddc23d4f689(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-10T10:32:21,159 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 6a7880317438693e0b317caf967f3a0e: 2024-11-10T10:32:21,159 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-10T10:32:21,159 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-10T10:32:21,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-11-10T10:32:21,162 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:21,163 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-11-10T10:32:21,163 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-11-10T10:32:21,198 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, size=0 (0bytes) 2024-11-10T10:32:21,198 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 might be still open, length is 0 2024-11-10T10:32:21,198 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 2024-11-10T10:32:21,199 WARN [IPC Server handler 2 on default port 39939 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 has not been closed. Lease recovery is in progress. RecoveryId = 1101 for block blk_1073741917_1095 2024-11-10T10:32:21,199 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 after 1ms 2024-11-10T10:32:22,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:56562 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:44251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56562 dst: /127.0.0.1:44251 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44251 remote=/127.0.0.1:56562]. Total timeout mills is 60000, 58477 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:22,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42732 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:43217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42732 dst: /127.0.0.1:43217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:22,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42570 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741917_1095] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42570 dst: /127.0.0.1:44741 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:25,114 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:32:25,200 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 after 4002ms 2024-11-10T10:32:25,203 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:25,203 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 took 4005ms 2024-11-10T10:32:25,205 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957; continuing. 2024-11-10T10:32:25,205 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 so closing down 2024-11-10T10:32:25,205 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:25,206 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731234740957.temp 2024-11-10T10:32:25,207 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp 2024-11-10T10:32:25,208 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:25,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741923_1102 (size=549) 2024-11-10T10:32:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741923_1102 (size=549) 2024-11-10T10:32:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741923_1102 (size=549) 2024-11-10T10:32:25,214 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:25,215 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp to hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 2024-11-10T10:32:25,215 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 12 ms; skipped=3; WAL=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, size=0, length=0, corrupted=false, cancelled=false 2024-11-10T10:32:25,215 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, journal: Splitting hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, size=0 (0bytes) at 1731234741198Finishing writing output for hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 so closing down at 1731234745205 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp at 1731234745207 (+2 ms)3 split writer threads finished at 1731234745208 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731234745214 (+6 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000003-wal.1731234740957.temp to hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 at 1731234745215 (+1 ms)Processed 5 edits across 1 Regions in 12 ms; skipped=3; WAL=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957, size=0, length=0, corrupted=false, cancelled=false at 1731234745215 2024-11-10T10:32:25,217 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234740957 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234740957 2024-11-10T10:32:25,218 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 2024-11-10T10:32:25,218 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:25,219 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:25,233 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234745220, exclude list is [], retry=0 2024-11-10T10:32:25,235 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:25,236 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:25,236 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:25,238 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731234740893/wal.1731234745220 2024-11-10T10:32:25,238 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:25,238 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 6a7880317438693e0b317caf967f3a0e, NAME => 'testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:25,238 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:25,238 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,238 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,239 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,240 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName a 2024-11-10T10:32:25,240 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,246 DEBUG [StoreFileOpener-6a7880317438693e0b317caf967f3a0e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:25,246 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ 2024-11-10T10:32:25,249 DEBUG [StoreFileOpener-6a7880317438693e0b317caf967f3a0e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:25,249 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ 2024-11-10T10:32:25,253 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/74b1ef4692c74b88be3a7ddc23d4f689 2024-11-10T10:32:25,257 DEBUG [StoreFileOpener-6a7880317438693e0b317caf967f3a0e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9207645448be4cdd865a7bbd6eb80c27_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-10T10:32:25,257 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ 2024-11-10T10:32:25,260 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9 2024-11-10T10:32:25,261 WARN [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@7fc2b637 2024-11-10T10:32:25,261 WARN [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@7fc2b637 2024-11-10T10:32:25,261 WARN [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@7fc2b637 2024-11-10T10:32:25,261 WARN [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@7fc2b637 2024-11-10T10:32:25,261 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9] to archive 2024-11-10T10:32:25,262 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-10T10:32:25,263 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ to hdfs://localhost:39939/hbase/archive/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_ 2024-11-10T10:32:25,264 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ to hdfs://localhost:39939/hbase/archive/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_ 2024-11-10T10:32:25,265 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ to hdfs://localhost:39939/hbase/archive/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/9207645448be4cdd865a7bbd6eb80c27_SeqId_4_ 2024-11-10T10:32:25,266 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9 to hdfs://localhost:39939/hbase/archive/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/a/d0ae04ebf4ac4cba9e75264d43606dd9 2024-11-10T10:32:25,266 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,266 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,267 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName b 2024-11-10T10:32:25,267 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,267 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,268 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,268 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6a7880317438693e0b317caf967f3a0e columnFamilyName c 2024-11-10T10:32:25,268 DEBUG [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,269 INFO [StoreOpener-6a7880317438693e0b317caf967f3a0e-1 {}] regionserver.HStore(327): Store=6a7880317438693e0b317caf967f3a0e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,269 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,269 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,271 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,271 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 2024-11-10T10:32:25,273 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:25,274 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 6a7880317438693e0b317caf967f3a0e : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "6a7880317438693e0b317caf967f3a0e" family_name: "a" compaction_input: "d0ae04ebf4ac4cba9e75264d43606dd9" compaction_input: "54bf30d4d6d542daa8bfc2c861a42426_SeqId_4_" compaction_input: "3858dd8dfb2d47a1b1b42e3a0e87692e_SeqId_4_" compaction_input: "9207645448be4cdd865a7bbd6eb80c27_SeqId_4_" compaction_output: "74b1ef4692c74b88be3a7ddc23d4f689" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-11-10T10:32:25,274 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-11-10T10:32:25,274 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 2024-11-10T10:32:25,275 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/0000000000000000008 2024-11-10T10:32:25,276 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,276 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,276 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:25,278 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 6a7880317438693e0b317caf967f3a0e 2024-11-10T10:32:25,280 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testCompactedBulkLoadedFiles/6a7880317438693e0b317caf967f3a0e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-10T10:32:25,280 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 6a7880317438693e0b317caf967f3a0e; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70699177, jitterRate=0.05349983274936676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:25,280 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 6a7880317438693e0b317caf967f3a0e: Writing region info on filesystem at 1731234745238Initializing all the Stores at 1731234745239 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745239Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745239Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745239Cleaning up temporary data from old regions at 1731234745276 (+37 ms)Region opened successfully at 1731234745280 (+4 ms) 2024-11-10T10:32:25,283 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 6a7880317438693e0b317caf967f3a0e, disabling compactions & flushes 2024-11-10T10:32:25,283 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:25,283 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:25,283 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. after waiting 0 ms 2024-11-10T10:32:25,283 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:25,283 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731234740894.6a7880317438693e0b317caf967f3a0e. 2024-11-10T10:32:25,283 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 6a7880317438693e0b317caf967f3a0e: Waiting for close lock at 1731234745283Disabling compacts and flushes for region at 1731234745283Disabling writes for close at 1731234745283Writing region close event to WAL at 1731234745283Closed at 1731234745283 2024-11-10T10:32:25,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741924_1103 (size=95) 2024-11-10T10:32:25,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741924_1103 (size=95) 2024-11-10T10:32:25,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741924_1103 (size=95) 2024-11-10T10:32:25,288 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:25,288 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731234745220) 2024-11-10T10:32:25,304 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=441 (was 427) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1739173161_22 at /127.0.0.1:42786 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:39939 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1739173161_22 at /127.0.0.1:42648 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1739173161_22 at /127.0.0.1:56642 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1346 (was 1264) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=182 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=8671 (was 8707) 2024-11-10T10:32:25,304 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-11-10T10:32:25,318 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=441, OpenFileDescriptor=1346, MaxFileDescriptor=1048576, SystemLoadAverage=182, ProcessCount=11, AvailableMemoryMB=8671 2024-11-10T10:32:25,318 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1346 is superior to 1024 2024-11-10T10:32:25,333 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:25,335 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-10T10:32:25,335 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-10T10:32:25,337 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-41857183, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/hregion-41857183, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:25,350 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/hregion-41857183/hregion-41857183.1731234745337, exclude list is [], retry=0 2024-11-10T10:32:25,353 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:25,353 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:25,353 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:25,355 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-41857183/hregion-41857183.1731234745337 2024-11-10T10:32:25,355 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:25,355 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 7febc022c08d82520e304375dc150a80, NAME => 'testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39939/hbase 2024-11-10T10:32:25,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741926_1105 (size=67) 2024-11-10T10:32:25,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741926_1105 (size=67) 2024-11-10T10:32:25,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741926_1105 (size=67) 2024-11-10T10:32:25,366 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:25,367 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,369 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName a 2024-11-10T10:32:25,369 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,369 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,369 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,370 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName b 2024-11-10T10:32:25,370 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,371 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,371 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,372 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName c 2024-11-10T10:32:25,372 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,372 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,372 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,373 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,373 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,374 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,374 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,374 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:25,375 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,377 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-10T10:32:25,377 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7febc022c08d82520e304375dc150a80; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67065888, jitterRate=-6.403923034667969E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7febc022c08d82520e304375dc150a80: Writing region info on filesystem at 1731234745367Initializing all the Stores at 1731234745367Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745367Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745367Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745367Cleaning up temporary data from old regions at 1731234745374 (+7 ms)Region opened successfully at 1731234745378 (+4 ms) 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7febc022c08d82520e304375dc150a80, disabling compactions & flushes 2024-11-10T10:32:25,378 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. after waiting 0 ms 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,378 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,378 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7febc022c08d82520e304375dc150a80: Waiting for close lock at 1731234745378Disabling compacts and flushes for region at 1731234745378Disabling writes for close at 1731234745378Writing region close event to WAL at 1731234745378Closed at 1731234745378 2024-11-10T10:32:25,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741925_1104 (size=95) 2024-11-10T10:32:25,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741925_1104 (size=95) 2024-11-10T10:32:25,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741925_1104 (size=95) 2024-11-10T10:32:25,383 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:25,383 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-41857183:(num 1731234745337) 2024-11-10T10:32:25,383 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:25,384 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:25,398 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, exclude list is [], retry=0 2024-11-10T10:32:25,400 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:25,400 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:25,400 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:25,401 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 2024-11-10T10:32:25,402 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983),(127.0.0.1/127.0.0.1:38697:38697)] 2024-11-10T10:32:25,402 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7febc022c08d82520e304375dc150a80, NAME => 'testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:25,402 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:25,402 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,402 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,404 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,404 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName a 2024-11-10T10:32:25,404 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,405 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,405 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,406 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName b 2024-11-10T10:32:25,406 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,406 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,406 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,407 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName c 2024-11-10T10:32:25,407 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,407 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,407 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,408 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,409 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,410 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,410 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,410 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:25,411 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,412 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7febc022c08d82520e304375dc150a80; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65934740, jitterRate=-0.017495810985565186}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:25,412 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7febc022c08d82520e304375dc150a80: Writing region info on filesystem at 1731234745402Initializing all the Stores at 1731234745403 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745403Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745403Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745403Cleaning up temporary data from old regions at 1731234745410 (+7 ms)Region opened successfully at 1731234745412 (+2 ms) 2024-11-10T10:32:25,420 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7febc022c08d82520e304375dc150a80 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-10T10:32:25,436 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fe12b2e2816f467bb9db169546ce5351 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731234745412/Put/seqid=0 2024-11-10T10:32:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741928_1107 (size=5958) 2024-11-10T10:32:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741928_1107 (size=5958) 2024-11-10T10:32:25,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741928_1107 (size=5958) 2024-11-10T10:32:25,444 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fe12b2e2816f467bb9db169546ce5351 2024-11-10T10:32:25,449 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fe12b2e2816f467bb9db169546ce5351 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fe12b2e2816f467bb9db169546ce5351 2024-11-10T10:32:25,454 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fe12b2e2816f467bb9db169546ce5351, entries=10, sequenceid=13, filesize=5.8 K 2024-11-10T10:32:25,455 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 7febc022c08d82520e304375dc150a80 in 35ms, sequenceid=13, compaction requested=false 2024-11-10T10:32:25,455 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 7febc022c08d82520e304375dc150a80: 2024-11-10T10:32:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7febc022c08d82520e304375dc150a80, disabling compactions & flushes 2024-11-10T10:32:25,472 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. after waiting 0 ms 2024-11-10T10:32:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,473 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,473 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:25,473 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7febc022c08d82520e304375dc150a80: Waiting for close lock at 1731234745472Disabling compacts and flushes for region at 1731234745472Disabling writes for close at 1731234745472Writing region close event to WAL at 1731234745473 (+1 ms)Closed at 1731234745473 2024-11-10T10:32:25,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741927_1106 (size=3346) 2024-11-10T10:32:25,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741927_1106 (size=3346) 2024-11-10T10:32:25,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741927_1106 (size=3346) 2024-11-10T10:32:25,493 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, size=3.3 K (3346bytes) 2024-11-10T10:32:25,493 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 2024-11-10T10:32:25,494 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 after 0ms 2024-11-10T10:32:25,495 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:25,496 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 took 4ms 2024-11-10T10:32:25,497 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 so closing down 2024-11-10T10:32:25,497 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:25,498 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731234745385.temp 2024-11-10T10:32:25,499 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp 2024-11-10T10:32:25,500 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741929_1108 (size=2944) 2024-11-10T10:32:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741929_1108 (size=2944) 2024-11-10T10:32:25,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741929_1108 (size=2944) 2024-11-10T10:32:25,507 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:25,508 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 2024-11-10T10:32:25,508 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 12 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-11-10T10:32:25,508 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, journal: Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, size=3.3 K (3346bytes) at 1731234745493Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 so closing down at 1731234745497 (+4 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp at 1731234745499 (+2 ms)3 split writer threads finished at 1731234745500 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731234745507 (+7 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000003-wal.1731234745385.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 at 1731234745508 (+1 ms)Processed 32 edits across 1 Regions in 12 ms; skipped=2; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1731234745508 2024-11-10T10:32:25,510 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745385 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234745385 2024-11-10T10:32:25,511 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 2024-11-10T10:32:25,511 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:25,512 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:25,526 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, exclude list is [], retry=0 2024-11-10T10:32:25,529 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:25,529 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:25,529 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:25,531 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 2024-11-10T10:32:25,531 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:25,531 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 7febc022c08d82520e304375dc150a80, NAME => 'testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.', STARTKEY => '', ENDKEY => ''} 2024-11-10T10:32:25,532 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:25,532 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,532 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,534 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,536 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName a 2024-11-10T10:32:25,536 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,542 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fe12b2e2816f467bb9db169546ce5351 2024-11-10T10:32:25,542 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,542 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,543 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName b 2024-11-10T10:32:25,543 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,543 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,543 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,544 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName c 2024-11-10T10:32:25,544 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:25,544 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:25,544 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,545 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,546 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,547 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 2024-11-10T10:32:25,548 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:25,549 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 2024-11-10T10:32:25,549 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7febc022c08d82520e304375dc150a80 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-11-10T10:32:25,564 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/737c57f0dec54461b5b0c10ef902c58c is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731234745455/Put/seqid=0 2024-11-10T10:32:25,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741931_1110 (size=5958) 2024-11-10T10:32:25,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741931_1110 (size=5958) 2024-11-10T10:32:25,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741931_1110 (size=5958) 2024-11-10T10:32:25,571 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/737c57f0dec54461b5b0c10ef902c58c 2024-11-10T10:32:25,590 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/451fc8fa2909450e81821f41bfe9bde3 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731234745462/Put/seqid=0 2024-11-10T10:32:25,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741932_1111 (size=5958) 2024-11-10T10:32:25,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741932_1111 (size=5958) 2024-11-10T10:32:25,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741932_1111 (size=5958) 2024-11-10T10:32:25,597 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/451fc8fa2909450e81821f41bfe9bde3 2024-11-10T10:32:25,603 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/737c57f0dec54461b5b0c10ef902c58c as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/b/737c57f0dec54461b5b0c10ef902c58c 2024-11-10T10:32:25,608 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/b/737c57f0dec54461b5b0c10ef902c58c, entries=10, sequenceid=35, filesize=5.8 K 2024-11-10T10:32:25,609 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/451fc8fa2909450e81821f41bfe9bde3 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/c/451fc8fa2909450e81821f41bfe9bde3 2024-11-10T10:32:25,614 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/c/451fc8fa2909450e81821f41bfe9bde3, entries=10, sequenceid=35, filesize=5.8 K 2024-11-10T10:32:25,614 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 7febc022c08d82520e304375dc150a80 in 65ms, sequenceid=35, compaction requested=false; wal=null 2024-11-10T10:32:25,615 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000035 2024-11-10T10:32:25,616 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,616 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,616 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:25,617 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:25,619 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-11-10T10:32:25,620 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7febc022c08d82520e304375dc150a80; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70835258, jitterRate=0.05552759766578674}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:25,620 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7febc022c08d82520e304375dc150a80: Writing region info on filesystem at 1731234745532Initializing all the Stores at 1731234745533 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745533Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745534 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234745534Obtaining lock to block concurrent updates at 1731234745549 (+15 ms)Preparing flush snapshotting stores in 7febc022c08d82520e304375dc150a80 at 1731234745549Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1731234745549Flushing stores of testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. at 1731234745549Flushing 7febc022c08d82520e304375dc150a80/b: creating writer at 1731234745550 (+1 ms)Flushing 7febc022c08d82520e304375dc150a80/b: appending metadata at 1731234745564 (+14 ms)Flushing 7febc022c08d82520e304375dc150a80/b: closing flushed file at 1731234745564Flushing 7febc022c08d82520e304375dc150a80/c: creating writer at 1731234745576 (+12 ms)Flushing 7febc022c08d82520e304375dc150a80/c: appending metadata at 1731234745590 (+14 ms)Flushing 7febc022c08d82520e304375dc150a80/c: closing flushed file at 1731234745590Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73d1ad57: reopening flushed file at 1731234745602 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@193658c9: reopening flushed file at 1731234745609 (+7 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 7febc022c08d82520e304375dc150a80 in 65ms, sequenceid=35, compaction requested=false; wal=null at 1731234745614 (+5 ms)Cleaning up temporary data from old regions at 1731234745616 (+2 ms)Region opened successfully at 1731234745620 (+4 ms) 2024-11-10T10:32:25,685 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, size=0 (0bytes) 2024-11-10T10:32:25,685 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 might be still open, length is 0 2024-11-10T10:32:25,685 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 2024-11-10T10:32:25,685 WARN [IPC Server handler 3 on default port 39939 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 has not been closed. Lease recovery is in progress. RecoveryId = 1112 for block blk_1073741930_1109 2024-11-10T10:32:25,685 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 after 0ms 2024-11-10T10:32:28,679 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42696 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:44741:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42696 dst: /127.0.0.1:44741 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:44741 remote=/127.0.0.1:42696]. Total timeout mills is 60000, 56968 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:28,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:56684 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:44251:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56684 dst: /127.0.0.1:44251 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:28,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42862 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741930_1109] {}] datanode.DataXceiver(331): 127.0.0.1:43217:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42862 dst: /127.0.0.1:43217 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:28,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741930_1112 (size=2936) 2024-11-10T10:32:28,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741930_1112 (size=2936) 2024-11-10T10:32:29,686 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 after 4001ms 2024-11-10T10:32:29,689 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:29,689 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 took 4005ms 2024-11-10T10:32:29,691 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513; continuing. 2024-11-10T10:32:29,691 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 so closing down 2024-11-10T10:32:29,691 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-10T10:32:29,693 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1731234745513.temp 2024-11-10T10:32:29,694 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp 2024-11-10T10:32:29,695 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-10T10:32:29,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741933_1113 (size=2944) 2024-11-10T10:32:29,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741933_1113 (size=2944) 2024-11-10T10:32:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741933_1113 (size=2944) 2024-11-10T10:32:29,701 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-10T10:32:29,702 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 2024-11-10T10:32:29,703 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, size=0, length=0, corrupted=false, cancelled=false 2024-11-10T10:32:29,703 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, journal: Splitting hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, size=0 (0bytes) at 1731234745685Finishing writing output for hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 so closing down at 1731234749691 (+4006 ms)Creating recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp at 1731234749694 (+3 ms)3 split writer threads finished at 1731234749695 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731234749701 (+6 ms)Rename recovered edits hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000037-wal.1731234745513.temp to hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 at 1731234749702 (+1 ms)Processed 30 edits across 1 Regions in 12 ms; skipped=0; WAL=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513, size=0, length=0, corrupted=false, cancelled=false at 1731234749703 (+1 ms) 2024-11-10T10:32:29,704 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234745513 to hdfs://localhost:39939/hbase/oldWALs/wal.1731234745513 2024-11-10T10:32:29,705 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 2024-11-10T10:32:29,705 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-10T10:32:29,707 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39939/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332, archiveDir=hdfs://localhost:39939/hbase/oldWALs, maxLogs=32 2024-11-10T10:32:29,721 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234749707, exclude list is [], retry=0 2024-11-10T10:32:29,723 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43217,DS-98f5c2d0-bdce-48dd-90e8-4b87e17d230d,DISK] 2024-11-10T10:32:29,724 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44741,DS-9e7b8df4-1272-4095-b85c-0dfc0bf327fa,DISK] 2024-11-10T10:32:29,724 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44251,DS-318b6bed-b662-4120-839e-ab74179655d9,DISK] 2024-11-10T10:32:29,725 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731234745332/wal.1731234749707 2024-11-10T10:32:29,725 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38697:38697),(127.0.0.1/127.0.0.1:37657:37657),(127.0.0.1/127.0.0.1:41983:41983)] 2024-11-10T10:32:29,726 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-10T10:32:29,727 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,728 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName a 2024-11-10T10:32:29,728 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:29,733 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fe12b2e2816f467bb9db169546ce5351 2024-11-10T10:32:29,733 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:29,733 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,734 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName b 2024-11-10T10:32:29,734 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:29,739 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/b/737c57f0dec54461b5b0c10ef902c58c 2024-11-10T10:32:29,739 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:29,739 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,740 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7febc022c08d82520e304375dc150a80 columnFamilyName c 2024-11-10T10:32:29,740 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-10T10:32:29,745 DEBUG [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/c/451fc8fa2909450e81821f41bfe9bde3 2024-11-10T10:32:29,745 INFO [StoreOpener-7febc022c08d82520e304375dc150a80-1 {}] regionserver.HStore(327): Store=7febc022c08d82520e304375dc150a80/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-10T10:32:29,745 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,746 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,747 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,748 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 2024-11-10T10:32:29,749 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-10T10:32:29,782 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 2024-11-10T10:32:29,782 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 7febc022c08d82520e304375dc150a80 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-10T10:32:29,798 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fc8f95c7f26041ac9b55ed9b9ff2620a is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1731234745626/Put/seqid=0 2024-11-10T10:32:29,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741935_1115 (size=5958) 2024-11-10T10:32:29,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741935_1115 (size=5958) 2024-11-10T10:32:29,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741935_1115 (size=5958) 2024-11-10T10:32:29,805 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fc8f95c7f26041ac9b55ed9b9ff2620a 2024-11-10T10:32:29,830 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/9b97f9d52137494b9b07e5840c1ff413 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1731234745633/Put/seqid=0 2024-11-10T10:32:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741936_1116 (size=5958) 2024-11-10T10:32:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741936_1116 (size=5958) 2024-11-10T10:32:29,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741936_1116 (size=5958) 2024-11-10T10:32:29,837 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/9b97f9d52137494b9b07e5840c1ff413 2024-11-10T10:32:29,857 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/4f8209be66e045a1ad7f08e9970cc734 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1731234745640/Put/seqid=0 2024-11-10T10:32:29,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741937_1117 (size=5958) 2024-11-10T10:32:29,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741937_1117 (size=5958) 2024-11-10T10:32:29,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741937_1117 (size=5958) 2024-11-10T10:32:29,864 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/4f8209be66e045a1ad7f08e9970cc734 2024-11-10T10:32:29,869 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/a/fc8f95c7f26041ac9b55ed9b9ff2620a as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fc8f95c7f26041ac9b55ed9b9ff2620a 2024-11-10T10:32:29,873 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/a/fc8f95c7f26041ac9b55ed9b9ff2620a, entries=10, sequenceid=66, filesize=5.8 K 2024-11-10T10:32:29,874 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/b/9b97f9d52137494b9b07e5840c1ff413 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/b/9b97f9d52137494b9b07e5840c1ff413 2024-11-10T10:32:29,878 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/b/9b97f9d52137494b9b07e5840c1ff413, entries=10, sequenceid=66, filesize=5.8 K 2024-11-10T10:32:29,879 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/.tmp/c/4f8209be66e045a1ad7f08e9970cc734 as hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/c/4f8209be66e045a1ad7f08e9970cc734 2024-11-10T10:32:29,883 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/c/4f8209be66e045a1ad7f08e9970cc734, entries=10, sequenceid=66, filesize=5.8 K 2024-11-10T10:32:29,883 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 7febc022c08d82520e304375dc150a80 in 101ms, sequenceid=66, compaction requested=false; wal=null 2024-11-10T10:32:29,884 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/0000000000000000066 2024-11-10T10:32:29,885 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,885 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,886 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-10T10:32:29,887 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 7febc022c08d82520e304375dc150a80 2024-11-10T10:32:29,889 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/hbase/data/default/testReplayEditsWrittenViaHRegion/7febc022c08d82520e304375dc150a80/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-11-10T10:32:29,890 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 7febc022c08d82520e304375dc150a80; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74719848, jitterRate=0.11341249942779541}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-10T10:32:29,890 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 7febc022c08d82520e304375dc150a80: Writing region info on filesystem at 1731234749726Initializing all the Stores at 1731234749727 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234749727Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234749727Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731234749727Obtaining lock to block concurrent updates at 1731234749782 (+55 ms)Preparing flush snapshotting stores in 7febc022c08d82520e304375dc150a80 at 1731234749782Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1731234749782Flushing stores of testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. at 1731234749782Flushing 7febc022c08d82520e304375dc150a80/a: creating writer at 1731234749782Flushing 7febc022c08d82520e304375dc150a80/a: appending metadata at 1731234749797 (+15 ms)Flushing 7febc022c08d82520e304375dc150a80/a: closing flushed file at 1731234749797Flushing 7febc022c08d82520e304375dc150a80/b: creating writer at 1731234749809 (+12 ms)Flushing 7febc022c08d82520e304375dc150a80/b: appending metadata at 1731234749829 (+20 ms)Flushing 7febc022c08d82520e304375dc150a80/b: closing flushed file at 1731234749829Flushing 7febc022c08d82520e304375dc150a80/c: creating writer at 1731234749843 (+14 ms)Flushing 7febc022c08d82520e304375dc150a80/c: appending metadata at 1731234749856 (+13 ms)Flushing 7febc022c08d82520e304375dc150a80/c: closing flushed file at 1731234749856Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4747433d: reopening flushed file at 1731234749868 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@12bc849b: reopening flushed file at 1731234749873 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3869ec89: reopening flushed file at 1731234749878 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 7febc022c08d82520e304375dc150a80 in 101ms, sequenceid=66, compaction requested=false; wal=null at 1731234749883 (+5 ms)Cleaning up temporary data from old regions at 1731234749885 (+2 ms)Region opened successfully at 1731234749890 (+5 ms) 2024-11-10T10:32:29,902 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 7febc022c08d82520e304375dc150a80, disabling compactions & flushes 2024-11-10T10:32:29,902 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:29,902 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:29,902 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. after waiting 0 ms 2024-11-10T10:32:29,902 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:29,904 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731234745333.7febc022c08d82520e304375dc150a80. 2024-11-10T10:32:29,904 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 7febc022c08d82520e304375dc150a80: Waiting for close lock at 1731234749902Disabling compacts and flushes for region at 1731234749902Disabling writes for close at 1731234749902Writing region close event to WAL at 1731234749904 (+2 ms)Closed at 1731234749904 2024-11-10T10:32:29,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741934_1114 (size=95) 2024-11-10T10:32:29,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741934_1114 (size=95) 2024-11-10T10:32:29,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741934_1114 (size=95) 2024-11-10T10:32:29,909 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-10T10:32:29,909 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731234749707) 2024-11-10T10:32:29,926 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=445 (was 441) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:39939 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_782239754_22 at /127.0.0.1:42914 [Waiting for operation #23] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_782239754_22 at /127.0.0.1:56726 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_782239754_22 at /127.0.0.1:42754 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1412 (was 1346) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=168 (was 182), ProcessCount=11 (was 11), AvailableMemoryMB=8641 (was 8671) 2024-11-10T10:32:29,927 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1412 is superior to 1024 2024-11-10T10:32:29,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-10T10:32:29,927 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T10:32:29,927 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T10:32:29,927 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:29,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:29,928 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-10T10:32:29,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-10T10:32:29,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1021476038, stopped=false 2024-11-10T10:32:29,929 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=eccb6f743e07,38215,1731234710743 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:32:29,931 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:32:29,931 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T10:32:29,931 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-10T10:32:29,931 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T10:32:29,931 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:29,932 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'eccb6f743e07,44191,1731234711534' ***** 2024-11-10T10:32:29,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:32:29,932 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T10:32:29,932 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'eccb6f743e07,39867,1731234711581' ***** 2024-11-10T10:32:29,932 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-10T10:32:29,932 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T10:32:29,932 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-10T10:32:29,932 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:32:29,932 INFO [RS:1;eccb6f743e07:44191 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T10:32:29,932 INFO [RS:2;eccb6f743e07:39867 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-10T10:32:29,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T10:32:29,932 INFO [RS:1;eccb6f743e07:44191 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T10:32:29,932 INFO [RS:2;eccb6f743e07:39867 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-10T10:32:29,933 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(3091): Received CLOSE for 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(959): stopping server eccb6f743e07,39867,1731234711581 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T10:32:29,933 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(959): stopping server eccb6f743e07,44191,1731234711534 2024-11-10T10:32:29,933 INFO [RS:1;eccb6f743e07:44191 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;eccb6f743e07:39867. 2024-11-10T10:32:29,933 INFO [RS:1;eccb6f743e07:44191 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;eccb6f743e07:44191. 2024-11-10T10:32:29,933 DEBUG [RS:2;eccb6f743e07:39867 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T10:32:29,933 DEBUG [RS:2;eccb6f743e07:39867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:29,933 DEBUG [RS:1;eccb6f743e07:44191 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-10T10:32:29,933 DEBUG [RS:1;eccb6f743e07:44191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:29,933 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 298cceebf7d48f429a5ffead936f01b2, disabling compactions & flushes 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T10:32:29,933 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:29,933 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T10:32:29,933 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1325): Online Regions={298cceebf7d48f429a5ffead936f01b2=testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2.} 2024-11-10T10:32:29,933 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:29,933 DEBUG [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1351): Waiting on 298cceebf7d48f429a5ffead936f01b2 2024-11-10T10:32:29,933 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. after waiting 0 ms 2024-11-10T10:32:29,933 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-10T10:32:29,933 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:29,933 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-10T10:32:29,933 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-10T10:32:29,935 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-10T10:32:29,935 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-10T10:32:29,935 DEBUG [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-10T10:32:29,935 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-10T10:32:29,935 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-10T10:32:29,935 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-10T10:32:29,935 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-10T10:32:29,935 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-10T10:32:29,935 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=6.86 KB heapSize=11.45 KB 2024-11-10T10:32:29,939 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/default/testReplayEditsAfterRegionMovedWithMultiCF/298cceebf7d48f429a5ffead936f01b2/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-11-10T10:32:29,940 INFO [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:29,940 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 298cceebf7d48f429a5ffead936f01b2: Waiting for close lock at 1731234749933Running coprocessor pre-close hooks at 1731234749933Disabling compacts and flushes for region at 1731234749933Disabling writes for close at 1731234749933Writing region close event to WAL at 1731234749935 (+2 ms)Running coprocessor post-close hooks at 1731234749940 (+5 ms)Closed at 1731234749940 2024-11-10T10:32:29,940 DEBUG [RS_CLOSE_REGION-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2. 2024-11-10T10:32:29,957 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/info/f48826fb070642948e7e78266a161140 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1731234726494.298cceebf7d48f429a5ffead936f01b2./info:regioninfo/1731234729590/Put/seqid=0 2024-11-10T10:32:29,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741938_1118 (size=8243) 2024-11-10T10:32:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741938_1118 (size=8243) 2024-11-10T10:32:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741938_1118 (size=8243) 2024-11-10T10:32:29,964 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.65 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/info/f48826fb070642948e7e78266a161140 2024-11-10T10:32:29,983 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/ns/85348cc826174fa2840fd71e1998931c is 43, key is default/ns:d/1731234713649/Put/seqid=0 2024-11-10T10:32:29,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741939_1119 (size=5153) 2024-11-10T10:32:29,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741939_1119 (size=5153) 2024-11-10T10:32:29,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741939_1119 (size=5153) 2024-11-10T10:32:29,993 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/ns/85348cc826174fa2840fd71e1998931c 2024-11-10T10:32:30,012 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/table/bf1f1005706746f1bcca4eba765c4a2e is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1731234726919/Put/seqid=0 2024-11-10T10:32:30,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741940_1120 (size=5431) 2024-11-10T10:32:30,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741940_1120 (size=5431) 2024-11-10T10:32:30,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741940_1120 (size=5431) 2024-11-10T10:32:30,019 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/table/bf1f1005706746f1bcca4eba765c4a2e 2024-11-10T10:32:30,023 INFO [regionserver/eccb6f743e07:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:30,024 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/info/f48826fb070642948e7e78266a161140 as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/info/f48826fb070642948e7e78266a161140 2024-11-10T10:32:30,027 INFO [regionserver/eccb6f743e07:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:30,029 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/info/f48826fb070642948e7e78266a161140, entries=18, sequenceid=21, filesize=8.0 K 2024-11-10T10:32:30,030 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/ns/85348cc826174fa2840fd71e1998931c as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/ns/85348cc826174fa2840fd71e1998931c 2024-11-10T10:32:30,034 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/ns/85348cc826174fa2840fd71e1998931c, entries=2, sequenceid=21, filesize=5.0 K 2024-11-10T10:32:30,035 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/.tmp/table/bf1f1005706746f1bcca4eba765c4a2e as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/table/bf1f1005706746f1bcca4eba765c4a2e 2024-11-10T10:32:30,039 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/table/bf1f1005706746f1bcca4eba765c4a2e, entries=2, sequenceid=21, filesize=5.3 K 2024-11-10T10:32:30,040 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=21, compaction requested=false 2024-11-10T10:32:30,045 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-10T10:32:30,045 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T10:32:30,046 INFO [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-10T10:32:30,046 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731234749935Running coprocessor pre-close hooks at 1731234749935Disabling compacts and flushes for region at 1731234749935Disabling writes for close at 1731234749935Obtaining lock to block concurrent updates at 1731234749935Preparing flush snapshotting stores in 1588230740 at 1731234749935Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=7029, getHeapSize=11664, getOffHeapSize=0, getCellsCount=48 at 1731234749936 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731234749937 (+1 ms)Flushing 1588230740/info: creating writer at 1731234749937Flushing 1588230740/info: appending metadata at 1731234749957 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731234749957Flushing 1588230740/ns: creating writer at 1731234749969 (+12 ms)Flushing 1588230740/ns: appending metadata at 1731234749983 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731234749983Flushing 1588230740/table: creating writer at 1731234749998 (+15 ms)Flushing 1588230740/table: appending metadata at 1731234750011 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731234750011Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35102f8e: reopening flushed file at 1731234750023 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4701808b: reopening flushed file at 1731234750029 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56ebd53a: reopening flushed file at 1731234750034 (+5 ms)Finished flush of dataSize ~6.86 KB/7029, heapSize ~11.16 KB/11424, currentSize=0 B/0 for 1588230740 in 105ms, sequenceid=21, compaction requested=false at 1731234750040 (+6 ms)Writing region close event to WAL at 1731234750042 (+2 ms)Running coprocessor post-close hooks at 1731234750045 (+3 ms)Closed at 1731234750046 (+1 ms) 2024-11-10T10:32:30,046 DEBUG [RS_CLOSE_META-regionserver/eccb6f743e07:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-10T10:32:30,133 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(976): stopping server eccb6f743e07,44191,1731234711534; all regions closed. 2024-11-10T10:32:30,135 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(976): stopping server eccb6f743e07,39867,1731234711581; all regions closed. 2024-11-10T10:32:30,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741834_1010 (size=2180) 2024-11-10T10:32:30,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741834_1010 (size=2180) 2024-11-10T10:32:30,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741834_1010 (size=2180) 2024-11-10T10:32:30,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741836_1012 (size=4715) 2024-11-10T10:32:30,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741836_1012 (size=4715) 2024-11-10T10:32:30,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741836_1012 (size=4715) 2024-11-10T10:32:30,139 DEBUG [RS:1;eccb6f743e07:44191 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs 2024-11-10T10:32:30,139 INFO [RS:1;eccb6f743e07:44191 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL eccb6f743e07%2C44191%2C1731234711534:(num 1731234713068) 2024-11-10T10:32:30,139 DEBUG [RS:1;eccb6f743e07:44191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:30,139 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] hbase.ChoreService(370): Chore service for: regionserver/eccb6f743e07:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-10T10:32:30,140 INFO [regionserver/eccb6f743e07:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T10:32:30,140 INFO [RS:1;eccb6f743e07:44191 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44191 2024-11-10T10:32:30,140 DEBUG [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs 2024-11-10T10:32:30,141 INFO [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL eccb6f743e07%2C39867%2C1731234711581.meta:.meta(num 1731234713470) 2024-11-10T10:32:30,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T10:32:30,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/eccb6f743e07,44191,1731234711534 2024-11-10T10:32:30,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741835_1011 (size=95) 2024-11-10T10:32:30,143 INFO [RS:1;eccb6f743e07:44191 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T10:32:30,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741835_1011 (size=95) 2024-11-10T10:32:30,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741835_1011 (size=95) 2024-11-10T10:32:30,144 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [eccb6f743e07,44191,1731234711534] 2024-11-10T10:32:30,145 DEBUG [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/oldWALs 2024-11-10T10:32:30,145 INFO [RS:2;eccb6f743e07:39867 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL eccb6f743e07%2C39867%2C1731234711581:(num 1731234713068) 2024-11-10T10:32:30,145 DEBUG [RS:2;eccb6f743e07:39867 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-10T10:32:30,145 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.LeaseManager(133): Closed leases 2024-11-10T10:32:30,145 INFO [RS:2;eccb6f743e07:39867 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T10:32:30,146 INFO [RS:2;eccb6f743e07:39867 {}] hbase.ChoreService(370): Chore service for: regionserver/eccb6f743e07:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-10T10:32:30,146 INFO [RS:2;eccb6f743e07:39867 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T10:32:30,146 INFO [regionserver/eccb6f743e07:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T10:32:30,146 INFO [RS:2;eccb6f743e07:39867 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39867 2024-11-10T10:32:30,147 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/eccb6f743e07,44191,1731234711534 already deleted, retry=false 2024-11-10T10:32:30,147 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; eccb6f743e07,44191,1731234711534 expired; onlineServers=1 2024-11-10T10:32:30,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-10T10:32:30,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/eccb6f743e07,39867,1731234711581 2024-11-10T10:32:30,148 INFO [RS:2;eccb6f743e07:39867 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T10:32:30,149 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [eccb6f743e07,39867,1731234711581] 2024-11-10T10:32:30,151 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/eccb6f743e07,39867,1731234711581 already deleted, retry=false 2024-11-10T10:32:30,151 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; eccb6f743e07,39867,1731234711581 expired; onlineServers=0 2024-11-10T10:32:30,151 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'eccb6f743e07,38215,1731234710743' ***** 2024-11-10T10:32:30,151 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-10T10:32:30,151 INFO [M:0;eccb6f743e07:38215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-10T10:32:30,151 INFO [M:0;eccb6f743e07:38215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-10T10:32:30,151 DEBUG [M:0;eccb6f743e07:38215 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-10T10:32:30,152 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-10T10:32:30,152 DEBUG [M:0;eccb6f743e07:38215 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-10T10:32:30,152 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.large.0-1731234712658 {}] cleaner.HFileCleaner(306): Exit Thread[master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.large.0-1731234712658,5,FailOnTimeoutGroup] 2024-11-10T10:32:30,152 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.small.0-1731234712660 {}] cleaner.HFileCleaner(306): Exit Thread[master/eccb6f743e07:0:becomeActiveMaster-HFileCleaner.small.0-1731234712660,5,FailOnTimeoutGroup] 2024-11-10T10:32:30,152 INFO [M:0;eccb6f743e07:38215 {}] hbase.ChoreService(370): Chore service for: master/eccb6f743e07:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-10T10:32:30,152 INFO [M:0;eccb6f743e07:38215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-10T10:32:30,152 DEBUG [M:0;eccb6f743e07:38215 {}] master.HMaster(1795): Stopping service threads 2024-11-10T10:32:30,152 INFO [M:0;eccb6f743e07:38215 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-10T10:32:30,152 INFO [M:0;eccb6f743e07:38215 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-10T10:32:30,153 INFO [M:0;eccb6f743e07:38215 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-10T10:32:30,153 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-10T10:32:30,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-10T10:32:30,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-10T10:32:30,155 DEBUG [M:0;eccb6f743e07:38215 {}] zookeeper.ZKUtil(347): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-10T10:32:30,155 WARN [M:0;eccb6f743e07:38215 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-10T10:32:30,155 INFO [M:0;eccb6f743e07:38215 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/.lastflushedseqids 2024-11-10T10:32:30,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741941_1121 (size=138) 2024-11-10T10:32:30,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741941_1121 (size=138) 2024-11-10T10:32:30,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741941_1121 (size=138) 2024-11-10T10:32:30,168 INFO [M:0;eccb6f743e07:38215 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-10T10:32:30,168 INFO [M:0;eccb6f743e07:38215 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-10T10:32:30,168 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-10T10:32:30,168 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:32:30,168 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:32:30,168 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-10T10:32:30,168 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:32:30,168 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=68.33 KB heapSize=83.72 KB 2024-11-10T10:32:30,183 DEBUG [M:0;eccb6f743e07:38215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0340b03d673640b7b789c25cbaf5ff2b is 82, key is hbase:meta,,1/info:regioninfo/1731234713562/Put/seqid=0 2024-11-10T10:32:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741942_1122 (size=5672) 2024-11-10T10:32:30,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741942_1122 (size=5672) 2024-11-10T10:32:30,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741942_1122 (size=5672) 2024-11-10T10:32:30,189 INFO [M:0;eccb6f743e07:38215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0340b03d673640b7b789c25cbaf5ff2b 2024-11-10T10:32:30,208 DEBUG [M:0;eccb6f743e07:38215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72270f790ad44d1e9014f1a14398a73c is 1075, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731234726924/Put/seqid=0 2024-11-10T10:32:30,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741943_1123 (size=7754) 2024-11-10T10:32:30,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741943_1123 (size=7754) 2024-11-10T10:32:30,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741943_1123 (size=7754) 2024-11-10T10:32:30,218 INFO [M:0;eccb6f743e07:38215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.60 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72270f790ad44d1e9014f1a14398a73c 2024-11-10T10:32:30,222 INFO [M:0;eccb6f743e07:38215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72270f790ad44d1e9014f1a14398a73c 2024-11-10T10:32:30,237 DEBUG [M:0;eccb6f743e07:38215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20c6317b2a8545889c5fdf49db8a0ccb is 69, key is eccb6f743e07,39867,1731234711581/rs:state/1731234712785/Put/seqid=0 2024-11-10T10:32:30,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741944_1124 (size=5440) 2024-11-10T10:32:30,245 INFO [RS:1;eccb6f743e07:44191 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T10:32:30,245 INFO [RS:1;eccb6f743e07:44191 {}] regionserver.HRegionServer(1031): Exiting; stopping=eccb6f743e07,44191,1731234711534; zookeeper connection closed. 2024-11-10T10:32:30,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44191-0x101016f2c550002, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,246 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c0c4061 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c0c4061 2024-11-10T10:32:30,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741944_1124 (size=5440) 2024-11-10T10:32:30,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741944_1124 (size=5440) 2024-11-10T10:32:30,247 INFO [M:0;eccb6f743e07:38215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20c6317b2a8545889c5fdf49db8a0ccb 2024-11-10T10:32:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,250 INFO [RS:2;eccb6f743e07:39867 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T10:32:30,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39867-0x101016f2c550003, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,250 INFO [RS:2;eccb6f743e07:39867 {}] regionserver.HRegionServer(1031): Exiting; stopping=eccb6f743e07,39867,1731234711581; zookeeper connection closed. 2024-11-10T10:32:30,250 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6873d097 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6873d097 2024-11-10T10:32:30,250 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-10T10:32:30,251 INFO [M:0;eccb6f743e07:38215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 20c6317b2a8545889c5fdf49db8a0ccb 2024-11-10T10:32:30,252 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0340b03d673640b7b789c25cbaf5ff2b as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0340b03d673640b7b789c25cbaf5ff2b 2024-11-10T10:32:30,257 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0340b03d673640b7b789c25cbaf5ff2b, entries=8, sequenceid=168, filesize=5.5 K 2024-11-10T10:32:30,258 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72270f790ad44d1e9014f1a14398a73c as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72270f790ad44d1e9014f1a14398a73c 2024-11-10T10:32:30,262 INFO [M:0;eccb6f743e07:38215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72270f790ad44d1e9014f1a14398a73c 2024-11-10T10:32:30,262 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72270f790ad44d1e9014f1a14398a73c, entries=17, sequenceid=168, filesize=7.6 K 2024-11-10T10:32:30,263 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/20c6317b2a8545889c5fdf49db8a0ccb as hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/20c6317b2a8545889c5fdf49db8a0ccb 2024-11-10T10:32:30,268 INFO [M:0;eccb6f743e07:38215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 20c6317b2a8545889c5fdf49db8a0ccb 2024-11-10T10:32:30,268 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39939/user/jenkins/test-data/12c52112-6087-7869-515a-a4f0103ccacf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/20c6317b2a8545889c5fdf49db8a0ccb, entries=3, sequenceid=168, filesize=5.3 K 2024-11-10T10:32:30,269 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(3140): Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=168, compaction requested=false 2024-11-10T10:32:30,270 INFO [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-10T10:32:30,270 DEBUG [M:0;eccb6f743e07:38215 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731234750168Disabling compacts and flushes for region at 1731234750168Disabling writes for close at 1731234750168Obtaining lock to block concurrent updates at 1731234750168Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731234750168Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=69972, getHeapSize=85664, getOffHeapSize=0, getCellsCount=195 at 1731234750169 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731234750169Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731234750169Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731234750183 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731234750183Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731234750194 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731234750207 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731234750207Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731234750222 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731234750236 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731234750236Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fd3c8a9: reopening flushed file at 1731234750251 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f3b4cf9: reopening flushed file at 1731234750257 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63a8623d: reopening flushed file at 1731234750263 (+6 ms)Finished flush of dataSize ~68.33 KB/69972, heapSize ~83.42 KB/85424, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=168, compaction requested=false at 1731234750269 (+6 ms)Writing region close event to WAL at 1731234750270 (+1 ms)Closed at 1731234750270 2024-11-10T10:32:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741830_1006 (size=56344) 2024-11-10T10:32:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44741 is added to blk_1073741830_1006 (size=56344) 2024-11-10T10:32:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44251 is added to blk_1073741830_1006 (size=56344) 2024-11-10T10:32:30,274 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-10T10:32:30,274 INFO [M:0;eccb6f743e07:38215 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-10T10:32:30,274 INFO [M:0;eccb6f743e07:38215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38215 2024-11-10T10:32:30,274 INFO [M:0;eccb6f743e07:38215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-10T10:32:30,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,376 INFO [M:0;eccb6f743e07:38215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-10T10:32:30,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38215-0x101016f2c550000, quorum=127.0.0.1:57026, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-10T10:32:30,383 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 0 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:30,484 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 1 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:30,686 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 2 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:30,988 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 3 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:31,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-10T10:32:31,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-10T10:32:31,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-10T10:32:31,162 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-11-10T10:32:31,493 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 4 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:32,499 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 5 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:34,506 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 6 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:35,442 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:32:38,539 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 7 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:48,604 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 8 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:32:49,899 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:32:58,657 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 9 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:33:08,695 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 10 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:33:18,732 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 11 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:33:19,900 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:33:38,835 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 12 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:33:49,900 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:33:59,002 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 13 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:34:19,047 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 14 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:34:19,900 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:34:39,108 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 15 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:34:49,900 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:34:59,126 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 16 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:35:19,131 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 17 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:35:19,900 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:35:39,234 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 18 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:35:49,901 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:35:59,279 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 19 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:36:19,450 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 20 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:36:19,901 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:36:39,478 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 21 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:36:49,901 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:36:51,669 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=17, reused chunk count=34, reuseRatio=66.67% 2024-11-10T10:36:51,669 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-10T10:36:59,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:36:59,555 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 22 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:37:19,575 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 23 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:37:19,901 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:37:39,582 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 24 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:37:49,902 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:37:59,643 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 25 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:38:19,667 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 26 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:38:19,902 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:38:39,724 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 27 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:38:49,902 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:38:59,922 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 28 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:39:19,902 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:39:20,086 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 29 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:39:40,116 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 30 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:39:49,902 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:40:00,236 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 31 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:40:19,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:40:20,248 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 32 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:40:40,393 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 33 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:40:49,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:41:00,406 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 34 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:41:19,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:41:20,551 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 35 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:41:40,570 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 36 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:41:49,903 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:41:51,669 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=9, created chunk count=17, reused chunk count=34, reuseRatio=66.67% 2024-11-10T10:41:51,669 DEBUG [master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-10T10:41:59,024 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-10T10:42:00,604 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 37 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:42:19,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:42:20,683 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 38 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:42:40,848 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 39 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:42:49,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:43:00,878 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 40 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:43:19,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:43:20,949 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 41 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:43:41,027 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 42 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:43:49,904 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:44:01,060 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 43 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:44:19,905 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-10T10:44:21,240 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 44 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:44:41,410 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 45 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-10T10:44:46,721 WARN [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(658): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 failed, retry = 46 org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731234729957/wal.1731234730231 (inode 16655) Holder DFSClient_NONMAPREDUCE_-1215601723_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] ====> TEST TIMED OUT. PRINTING THREAD DUMP. <==== Timestamp: 2024-11-10 10:44:46,724 "IPC Server idle connection scanner for port 39939" daemon prio=5 tid=56 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083, type=LAST_IN_PIPELINE" daemon prio=5 tid=1327 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "CacheReplicationMonitor(1944735714)" daemon prio=5 tid=75 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) "Socket Reader #1 for port 0" daemon prio=5 tid=95 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "IPC Server Responder" daemon prio=5 tid=131 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "IPC Server handler 1 on default port 44109" daemon prio=5 tid=192 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 2 on default port 42563" daemon prio=5 tid=106 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Socket Reader #1 for port 0" daemon prio=5 tid=55 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "Session-HouseKeeper-b476e79-1" prio=5 tid=157 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41462 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027]" daemon prio=5 tid=684 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-15-thread-1" daemon prio=5 tid=208 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=361 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-7" daemon prio=5 tid=1119 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=129 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "Signal Dispatcher" daemon prio=9 tid=4 runnable java.lang.Thread.State: RUNNABLE "ProcessThread(sid:0 cport:57026):" daemon prio=5 tid=244 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@728f29c7" daemon prio=5 tid=127 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=728 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data6)" daemon prio=5 tid=218 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "MiniHBaseClusterRegionServer-EventLoopGroup-3-3" daemon prio=10 tid=1089 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Block report processor" daemon prio=5 tid=51 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) "AsyncFSWAL-12-1" daemon prio=10 tid=699 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@23fc67e4" daemon prio=5 tid=61 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Finalizer" daemon prio=8 tid=3 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1454 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 39939" daemon prio=5 tid=64 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "AsyncFSWAL-26-2" daemon prio=10 tid=1426 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-12-2" daemon prio=10 tid=700 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FSEditLogAsync" daemon prio=5 tid=53 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp485409569-90" daemon prio=5 tid=90 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=101 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data4)" daemon prio=5 tid=165 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "AsyncFSWAL-20-3" daemon prio=10 tid=1186 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-0" daemon prio=5 tid=530 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ConnnectionExpirer" daemon prio=5 tid=238 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) "IPC Server handler 3 on default port 42563" daemon prio=5 tid=107 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data2/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=186 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-40" daemon prio=5 tid=40 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-5" daemon prio=5 tid=1088 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1330 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-26-thread-1" prio=5 tid=118 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-29-thread-1" prio=5 tid=137 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-10-1" daemon prio=10 tid=619 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-17-2" daemon prio=10 tid=982 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 44109" daemon prio=5 tid=191 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server idle connection scanner for port 41313" daemon prio=5 tid=130 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "pool-20-thread-1" prio=5 tid=103 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Monitor thread for TaskMonitor" daemon prio=5 tid=359 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1243452184-155" daemon prio=5 tid=155 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Parameter Sending Thread for localhost/127.0.0.1:39939" daemon prio=5 tid=1847 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-6" daemon prio=5 tid=267 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-8-2" daemon prio=10 tid=539 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-42" daemon prio=5 tid=42 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RedundancyMonitor" daemon prio=5 tid=47 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) at java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE" daemon prio=5 tid=605 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-14-3" daemon prio=10 tid=823 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=58 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-2" daemon prio=5 tid=536 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@48077e41" daemon prio=5 tid=72 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MarkedDeleteBlockScrubberThread" daemon prio=5 tid=48 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-stream-flusher" daemon prio=5 tid=16 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 44109" daemon prio=5 tid=193 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "master/eccb6f743e07:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=363 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test.named-queue-events-pool-0" daemon prio=5 tid=291 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) at app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) at app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) at app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE" daemon prio=5 tid=685 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "sync.1" daemon prio=5 tid=939 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) "IPC Server handler 4 on default port 41313" daemon prio=5 tid=142 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "AsyncFSWAL-8-3" daemon prio=10 tid=541 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-1" daemon prio=5 tid=535 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data5/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=224 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1010 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-2" daemon prio=5 tid=262 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data2)" daemon prio=5 tid=164 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "qtp485409569-87" daemon prio=5 tid=87 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "sync.4" daemon prio=5 tid=942 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) "NIOWorkerThread-5" daemon prio=5 tid=266 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 41313" daemon prio=5 tid=139 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server listener on 0" daemon prio=5 tid=168 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "Session-HouseKeeper-e9304c0-1" prio=5 tid=91 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-1" daemon prio=10 tid=292 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.SelectorThread-1" daemon prio=5 tid=240 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "NIOWorkerThread-4" daemon prio=5 tid=264 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-4-1" prio=10 tid=126 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-3" daemon prio=5 tid=263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE" daemon prio=5 tid=1408 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-1" daemon prio=10 tid=314 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1243452184-153" daemon prio=5 tid=153 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:56540 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091]" daemon prio=5 tid=1407 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@459792a9[State = -1, empty queue]" daemon prio=5 tid=215 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.PeerCache@434a0ce7" daemon prio=5 tid=365 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) at app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) at app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-1" daemon prio=5 tid=14 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-38" daemon prio=5 tid=38 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@39749b4" daemon prio=5 tid=93 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@134244c9" daemon prio=5 tid=85 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SSL Certificates Store Monitor" daemon prio=5 tid=25 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "surefire-forkedjvm-command-thread" daemon prio=5 tid=18 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) at java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) at app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) at app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) at app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) at app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) at app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkins" daemon prio=5 tid=124 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47174 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019]" daemon prio=5 tid=602 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 41313" daemon prio=5 tid=138 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-15" daemon prio=5 tid=276 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@6f5cc5da[State = -1, empty queue]" daemon prio=5 tid=232 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 39939" daemon prio=5 tid=66 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Command processor" daemon prio=5 tid=179 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "IPC Server handler 2 on default port 41313" daemon prio=5 tid=140 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "pool-36-thread-1" prio=5 tid=152 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=169 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "NIOWorkerThread-7" daemon prio=5 tid=268 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "LeaseRenewer:jenkins@localhost:39939" daemon prio=5 tid=248 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) at app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) at app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:44890 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083]" daemon prio=5 tid=1325 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcClient-timer-pool-0" daemon prio=5 tid=415 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-2-2" daemon prio=10 tid=436 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE" daemon prio=5 tid=1410 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-8" daemon prio=5 tid=269 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 44109" daemon prio=5 tid=195 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "AsyncFSWAL-26-3" daemon prio=10 tid=1428 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-2" daemon prio=10 tid=1090 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-14" daemon prio=5 tid=275 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-12" daemon prio=5 tid=273 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-11" daemon prio=5 tid=272 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@465d55c1[State = -1, empty queue]" daemon prio=5 tid=212 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test" daemon prio=5 tid=22 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.sleepIgnoreInterrupt(FanOutOneBlockAsyncDFSOutputHelper.java:669) at app//org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:663) at app//org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) at app//org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) at app//org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) at app//org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) at app//org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at app//org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) at app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2e68983a" daemon prio=5 tid=71 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-1" prio=10 tid=166 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-1" daemon prio=5 tid=246 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-24-2" daemon prio=10 tid=1360 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-22-1" daemon prio=10 tid=1276 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-2-1" prio=10 tid=92 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-23-thread-1" daemon prio=5 tid=209 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-28-1" daemon prio=10 tid=1539 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-1220159422-172.17.0.2-1731234707608 heartbeating to localhost/127.0.0.1:39939" daemon prio=5 tid=136 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "main" prio=5 tid=1 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.dumpThreads(Native Method) at java.base@17.0.11/java.lang.Thread.getAllStackTraces(Thread.java:1671) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDump(TimedOutTestsListener.java:92) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDiagnosticString(TimedOutTestsListener.java:78) at app//org.apache.hadoop.hbase.TimedOutTestsListener.testFailure(TimedOutTestsListener.java:65) at app//org.junit.runner.notification.SynchronizedRunListener.testFailure(SynchronizedRunListener.java:94) at app//org.junit.runner.notification.RunNotifier$6.notifyListener(RunNotifier.java:177) at app//org.junit.runner.notification.RunNotifier$SafeNotifier.run(RunNotifier.java:72) at app//org.junit.runner.notification.RunNotifier.fireTestFailures(RunNotifier.java:173) at app//org.junit.runner.notification.RunNotifier.fireTestFailure(RunNotifier.java:167) at app//org.apache.maven.surefire.common.junit4.Notifier.fireTestFailure(Notifier.java:100) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:23) at app//org.junit.internal.runners.model.EachTestNotifier.addMultipleFailureException(EachTestNotifier.java:29) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:21) at app//org.junit.runners.ParentRunner.run(ParentRunner.java:419) at app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) at app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) at app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) at app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) at app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) at app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) "MiniHBaseClusterRegionServer-EventLoopGroup-3-2" daemon prio=10 tid=1081 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51734 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019]" daemon prio=5 tid=603 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE" daemon prio=5 tid=687 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083, type=LAST_IN_PIPELINE" daemon prio=5 tid=1329 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 44109" daemon prio=5 tid=170 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "IPC Server handler 0 on default port 42563" daemon prio=5 tid=104 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data3/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=185 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FsDatasetAsyncDiskServiceFixer" daemon prio=5 tid=237 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) "AsyncFSWAL-28-2" daemon prio=10 tid=1540 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp485409569-88-acceptor-0@7a2f30aa-ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:35791}" daemon prio=3 tid=88 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-4" daemon prio=5 tid=1061 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SyncThread:0" daemon prio=5 tid=243 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=132 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-33-thread-1" daemon prio=5 tid=228 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57026" daemon prio=5 tid=241 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) "AsyncFSWAL-12-3" daemon prio=10 tid=702 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-10-3" daemon prio=10 tid=622 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-24-1" daemon prio=10 tid=1359 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Async-Client-Retry-Timer-pool-0" daemon prio=5 tid=414 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 39939" daemon prio=5 tid=68 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1600 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data1)" daemon prio=5 tid=163 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data1/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=189 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-39" daemon prio=5 tid=39 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=688 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027, type=LAST_IN_PIPELINE" daemon prio=5 tid=686 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Timer for 'HBase' metrics system" daemon prio=5 tid=1827 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "qtp136284002-44" daemon prio=5 tid=44 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data6/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=223 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=54 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:41400 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019]" daemon prio=5 tid=604 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-7594ef0e-1" prio=5 tid=123 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1243452184-156" daemon prio=5 tid=156 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE" daemon prio=5 tid=607 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "sync.2" daemon prio=5 tid=940 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42524 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091]" daemon prio=5 tid=1405 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data3)" daemon prio=5 tid=162 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "RequestThrottler" daemon prio=5 tid=245 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data5)" daemon prio=5 tid=217 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "NIOWorkerThread-9" daemon prio=5 tid=270 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Notification Thread" daemon prio=9 tid=13 runnable java.lang.Thread.State: RUNNABLE "Session-HouseKeeper-3b80a458-1" prio=5 tid=45 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DatanodeAdminMonitor-0" daemon prio=5 tid=62 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "sync.3" daemon prio=5 tid=941 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5a15ef52" daemon prio=5 tid=151 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Parameter Sending Thread for localhost/127.0.0.1:39939" daemon prio=5 tid=125 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-2" daemon prio=10 tid=401 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-3" daemon prio=10 tid=402 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-10" daemon prio=5 tid=271 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-28-3" daemon prio=10 tid=1541 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Common-Cleaner" daemon prio=8 tid=12 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) at java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@d1dc275" daemon prio=5 tid=74 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1938174733-121" daemon prio=5 tid=121 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-43" daemon prio=5 tid=43 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@6aa74238" daemon prio=5 tid=73 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/eccb6f743e07:0.procedureResultReporter" daemon prio=5 tid=486 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:51784 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027]" daemon prio=5 tid=682 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 42563" daemon prio=5 tid=96 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Idle-Rpc-Conn-Sweeper-pool-0" daemon prio=5 tid=416 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "weak-ref-cleaner-strictcontextstorage" daemon prio=1 tid=257 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 42563" daemon prio=5 tid=108 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "HBase-Metrics2-1" daemon prio=5 tid=258 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-2-3" daemon prio=10 tid=437 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-1220159422-172.17.0.2-1731234707608 heartbeating to localhost/127.0.0.1:39939" daemon prio=5 tid=183 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-18-thread-1" prio=5 tid=86 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=128 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "qtp485409569-89" daemon prio=5 tid=89 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-3" daemon prio=5 tid=537 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-14-2" daemon prio=10 tid=822 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=94 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "qtp1938174733-119" daemon prio=5 tid=119 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-2-1" daemon prio=10 tid=259 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 41313" daemon prio=5 tid=141 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOServerCxnFactory.SelectorThread-0" daemon prio=5 tid=239 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "SessionTracker" daemon prio=5 tid=242 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) "AsyncFSWAL-26-1" daemon prio=10 tid=1425 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083, type=LAST_IN_PIPELINE" daemon prio=5 tid=1328 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091, type=LAST_IN_PIPELINE" daemon prio=5 tid=1409 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=1411 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-12-thread-1" prio=5 tid=69 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@64b1aed7" daemon prio=5 tid=34 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@60b67a0f" daemon prio=5 tid=167 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-17-1" daemon prio=10 tid=981 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Client (1319272049) connection to localhost/127.0.0.1:39939 from jenkins" daemon prio=5 tid=1846 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "IPC Server Responder" daemon prio=5 tid=57 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "BP-1220159422-172.17.0.2-1731234707608 heartbeating to localhost/127.0.0.1:39939" daemon prio=5 tid=102 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-1" daemon prio=10 tid=336 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-1" daemon prio=5 tid=533 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@16489287" daemon prio=5 tid=49 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-17-3" daemon prio=10 tid=984 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Reference Handler" daemon prio=10 tid=2 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) at java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) at java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) "IPC Server handler 3 on default port 39939" daemon prio=5 tid=67 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "AsyncFSWAL-10-2" daemon prio=10 tid=620 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-8" daemon prio=5 tid=1168 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-3" daemon prio=10 tid=1118 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-6-thread-1" prio=5 tid=36 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SnapshotHandlerChoreCleaner" daemon prio=5 tid=427 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-0-hdfs://localhost:39939/hbase-prefix:default" daemon prio=5 tid=608 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-22-2" daemon prio=10 tid=1277 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-2" daemon prio=5 tid=15 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1243452184-154-acceptor-0@60f5bbe3-ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:35917}" daemon prio=3 tid=154 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:47230 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741851_1027]" daemon prio=5 tid=683 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-14-1" daemon prio=10 tid=821 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-20-2" daemon prio=10 tid=1185 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=98 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:53406 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083]" daemon prio=5 tid=1324 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Abort regionserver monitor" daemon prio=5 tid=1169 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "regionserver/eccb6f743e07:0.procedureResultReporter" daemon prio=5 tid=484 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "PacketResponder: BP-1220159422-172.17.0.2-1731234707608:blk_1073741843_1019, type=LAST_IN_PIPELINE" daemon prio=5 tid=606 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 39939" daemon prio=5 tid=65 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner" daemon prio=5 tid=23 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=171 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "NIOWorkerThread-13" daemon prio=5 tid=274 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-24-3" daemon prio=10 tid=1362 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-16" daemon prio=5 tid=277 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 44109" daemon prio=5 tid=194 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:42696 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741913_1091]" daemon prio=5 tid=1406 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=135 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "qtp136284002-41-acceptor-0@48656621-ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:43815}" daemon prio=3 tid=41 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-22-3" daemon prio=10 tid=1279 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1938174733-120-acceptor-0@5a27262-ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:32819}" daemon prio=3 tid=120 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@36a36667" daemon prio=5 tid=117 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=172 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1938174733-122" daemon prio=5 tid=122 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "TestAsyncWALReplay-pool-0" daemon prio=5 tid=601 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-8-1" daemon prio=10 tid=538 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-6" daemon prio=5 tid=1117 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DataXceiver for client DFSClient_NONMAPREDUCE_-1215601723_22 at /127.0.0.1:48480 [Receiving block BP-1220159422-172.17.0.2-1731234707608:blk_1073741905_1083]" daemon prio=5 tid=1326 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) at app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) at app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) at app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) at app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) at app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) at app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp136284002-37" daemon prio=5 tid=37 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$256/0x00007fa8604333d0.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "sync.0" daemon prio=5 tid=938 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) at app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) "AsyncFSWAL-20-1" daemon prio=10 tid=1184 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100" daemon prio=5 tid=35 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/3d7426e2-42ef-63d4-5c53-b760f69e9b80/cluster_22ba0d42-4bc4-6a87-e00f-9ac91fb0aa48/data/data4/current/BP-1220159422-172.17.0.2-1731234707608" daemon prio=5 tid=190 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-44-thread-1" prio=5 tid=188 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-7-thread-1" prio=5 tid=46 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/eccb6f743e07:0.procedureResultReporter" daemon prio=5 tid=485 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "IPC Server handler 1 on default port 42563" daemon prio=5 tid=105 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server Responder" daemon prio=5 tid=97 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716)