2024-11-11 16:25:43,143 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-11 16:25:43,158 main DEBUG Took 0.012202 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 16:25:43,158 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 16:25:43,159 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 16:25:43,160 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 16:25:43,163 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,173 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 16:25:43,188 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,190 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,190 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,191 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,192 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,192 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,193 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,194 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,194 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,195 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,196 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,196 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,197 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,197 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,198 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,198 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,199 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,199 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,200 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,201 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,202 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,202 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,203 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:25:43,204 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,204 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 16:25:43,206 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:25:43,207 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 16:25:43,209 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 16:25:43,210 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 16:25:43,211 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 16:25:43,212 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 16:25:43,225 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 16:25:43,228 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 16:25:43,230 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 16:25:43,231 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 16:25:43,231 main DEBUG createAppenders(={Console}) 2024-11-11 16:25:43,232 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a initialized 2024-11-11 16:25:43,233 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a 2024-11-11 16:25:43,233 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@20b2475a OK. 2024-11-11 16:25:43,234 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 16:25:43,234 main DEBUG OutputStream closed 2024-11-11 16:25:43,234 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 16:25:43,235 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 16:25:43,235 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@4310d43 OK 2024-11-11 16:25:43,340 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 16:25:43,343 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 16:25:43,344 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 16:25:43,345 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 16:25:43,346 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 16:25:43,347 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 16:25:43,347 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 16:25:43,347 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 16:25:43,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 16:25:43,348 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 16:25:43,349 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 16:25:43,349 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 16:25:43,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 16:25:43,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 16:25:43,350 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 16:25:43,351 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 16:25:43,351 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 16:25:43,351 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 16:25:43,354 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 16:25:43,354 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@12f9af83) with optional ClassLoader: null 2024-11-11 16:25:43,355 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 16:25:43,356 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@12f9af83] started OK. 2024-11-11T16:25:43,784 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273 2024-11-11 16:25:43,790 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 16:25:43,790 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T16:25:43,802 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay timeout: 13 mins 2024-11-11T16:25:43,809 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplayValueCompression timeout: 13 mins 2024-11-11T16:25:43,840 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T16:25:43,931 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T16:25:43,931 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T16:25:43,948 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T16:25:43,966 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b, deleteOnExit=true 2024-11-11T16:25:43,966 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T16:25:43,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/test.cache.data in system properties and HBase conf 2024-11-11T16:25:43,967 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T16:25:43,968 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir in system properties and HBase conf 2024-11-11T16:25:43,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T16:25:43,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T16:25:43,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T16:25:44,103 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T16:25:44,250 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T16:25:44,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:25:44,257 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:25:44,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T16:25:44,258 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:25:44,259 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T16:25:44,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T16:25:44,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:25:44,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:25:44,261 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T16:25:44,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/nfs.dump.dir in system properties and HBase conf 2024-11-11T16:25:44,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/java.io.tmpdir in system properties and HBase conf 2024-11-11T16:25:44,263 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:25:44,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T16:25:44,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T16:25:45,360 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T16:25:45,443 INFO [Time-limited test {}] log.Log(170): Logging initialized @3528ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T16:25:45,536 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:25:45,630 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:25:45,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:25:45,650 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:25:45,652 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:25:45,674 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:25:45,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:25:45,681 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:25:45,961 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3717288f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/java.io.tmpdir/jetty-localhost-38255-hadoop-hdfs-3_4_1-tests_jar-_-any-1190308671569354881/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:25:45,979 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:38255} 2024-11-11T16:25:45,980 INFO [Time-limited test {}] server.Server(415): Started @4065ms 2024-11-11T16:25:46,508 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:25:46,524 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:25:46,529 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:25:46,529 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:25:46,530 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T16:25:46,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:25:46,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:25:46,685 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@330740de{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/java.io.tmpdir/jetty-localhost-35255-hadoop-hdfs-3_4_1-tests_jar-_-any-11900404693627667276/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:25:46,686 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:35255} 2024-11-11T16:25:46,686 INFO [Time-limited test {}] server.Server(415): Started @4772ms 2024-11-11T16:25:46,746 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:25:46,950 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:25:46,959 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:25:46,977 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:25:46,977 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:25:46,978 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:25:46,981 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:25:46,982 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:25:47,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7bd427b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/java.io.tmpdir/jetty-localhost-41265-hadoop-hdfs-3_4_1-tests_jar-_-any-10723762398878768813/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:25:47,151 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:41265} 2024-11-11T16:25:47,152 INFO [Time-limited test {}] server.Server(415): Started @5237ms 2024-11-11T16:25:47,155 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:25:47,204 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:25:47,211 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:25:47,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:25:47,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:25:47,214 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:25:47,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:25:47,218 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:25:47,295 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data1/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,295 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data3/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,296 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data4/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,297 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data2/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35f1150e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/java.io.tmpdir/jetty-localhost-37457-hadoop-hdfs-3_4_1-tests_jar-_-any-14074441520888089431/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:25:47,365 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:37457} 2024-11-11T16:25:47,365 INFO [Time-limited test {}] server.Server(415): Started @5451ms 2024-11-11T16:25:47,368 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:25:47,379 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:25:47,379 WARN [Thread-83 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:25:47,479 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1eb91bb1cfaf8703 with lease ID 0x8191fabb35cad1e6: Processing first storage report for DS-e86d92e3-e756-4efa-8415-33ee44fedfc2 from datanode DatanodeRegistration(127.0.0.1:32929, datanodeUuid=cee060b1-9d20-4042-8c32-8327c5728766, infoPort=40387, infoSecurePort=0, ipcPort=44235, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,480 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1eb91bb1cfaf8703 with lease ID 0x8191fabb35cad1e6: from storage DS-e86d92e3-e756-4efa-8415-33ee44fedfc2 node DatanodeRegistration(127.0.0.1:32929, datanodeUuid=cee060b1-9d20-4042-8c32-8327c5728766, infoPort=40387, infoSecurePort=0, ipcPort=44235, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf5ad8eca7d61862 with lease ID 0x8191fabb35cad1e7: Processing first storage report for DS-6aee253a-12c8-459c-998e-494c3b2755a0 from datanode DatanodeRegistration(127.0.0.1:40903, datanodeUuid=b2549184-d599-471d-b3b3-6438de864e70, infoPort=34969, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf5ad8eca7d61862 with lease ID 0x8191fabb35cad1e7: from storage DS-6aee253a-12c8-459c-998e-494c3b2755a0 node DatanodeRegistration(127.0.0.1:40903, datanodeUuid=b2549184-d599-471d-b3b3-6438de864e70, infoPort=34969, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1eb91bb1cfaf8703 with lease ID 0x8191fabb35cad1e6: Processing first storage report for DS-02c1fabb-a856-4099-87fb-374f6ed04962 from datanode DatanodeRegistration(127.0.0.1:32929, datanodeUuid=cee060b1-9d20-4042-8c32-8327c5728766, infoPort=40387, infoSecurePort=0, ipcPort=44235, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1eb91bb1cfaf8703 with lease ID 0x8191fabb35cad1e6: from storage DS-02c1fabb-a856-4099-87fb-374f6ed04962 node DatanodeRegistration(127.0.0.1:32929, datanodeUuid=cee060b1-9d20-4042-8c32-8327c5728766, infoPort=40387, infoSecurePort=0, ipcPort=44235, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,482 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdf5ad8eca7d61862 with lease ID 0x8191fabb35cad1e7: Processing first storage report for DS-d60db9fc-ddff-4517-a292-4928465f6232 from datanode DatanodeRegistration(127.0.0.1:40903, datanodeUuid=b2549184-d599-471d-b3b3-6438de864e70, infoPort=34969, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdf5ad8eca7d61862 with lease ID 0x8191fabb35cad1e7: from storage DS-d60db9fc-ddff-4517-a292-4928465f6232 node DatanodeRegistration(127.0.0.1:40903, datanodeUuid=b2549184-d599-471d-b3b3-6438de864e70, infoPort=34969, infoSecurePort=0, ipcPort=40417, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,542 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data5/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,543 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data6/current/BP-1916425677-172.17.0.2-1731342345074/current, will proceed with Du for space computation calculation, 2024-11-11T16:25:47,576 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:25:47,584 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbea15eab0801e606 with lease ID 0x8191fabb35cad1e8: Processing first storage report for DS-11a8ce1d-a6ec-4582-95e1-dd214088af88 from datanode DatanodeRegistration(127.0.0.1:41813, datanodeUuid=70355d25-5036-47bb-bb7d-eed18d16b805, infoPort=33071, infoSecurePort=0, ipcPort=42069, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbea15eab0801e606 with lease ID 0x8191fabb35cad1e8: from storage DS-11a8ce1d-a6ec-4582-95e1-dd214088af88 node DatanodeRegistration(127.0.0.1:41813, datanodeUuid=70355d25-5036-47bb-bb7d-eed18d16b805, infoPort=33071, infoSecurePort=0, ipcPort=42069, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbea15eab0801e606 with lease ID 0x8191fabb35cad1e8: Processing first storage report for DS-d4c87077-b6c6-4fa8-8d3f-72a235a5f234 from datanode DatanodeRegistration(127.0.0.1:41813, datanodeUuid=70355d25-5036-47bb-bb7d-eed18d16b805, infoPort=33071, infoSecurePort=0, ipcPort=42069, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074) 2024-11-11T16:25:47,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbea15eab0801e606 with lease ID 0x8191fabb35cad1e8: from storage DS-d4c87077-b6c6-4fa8-8d3f-72a235a5f234 node DatanodeRegistration(127.0.0.1:41813, datanodeUuid=70355d25-5036-47bb-bb7d-eed18d16b805, infoPort=33071, infoSecurePort=0, ipcPort=42069, storageInfo=lv=-57;cid=testClusterID;nsid=1152938014;c=1731342345074), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:25:47,873 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273 2024-11-11T16:25:47,978 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/zookeeper_0, clientPort=59036, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T16:25:47,990 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59036 2024-11-11T16:25:48,027 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:48,031 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:48,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:25:48,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:25:48,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:25:48,694 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 with version=8 2024-11-11T16:25:48,694 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/hbase-staging 2024-11-11T16:25:49,093 INFO [Time-limited test {}] client.ConnectionUtils(128): master/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:25:49,106 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,107 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,114 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:25:49,114 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:25:49,321 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T16:25:49,404 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T16:25:49,420 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T16:25:49,425 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:25:49,461 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 4639 (auto-detected) 2024-11-11T16:25:49,463 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T16:25:49,492 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40215 2024-11-11T16:25:49,520 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40215 connecting to ZooKeeper ensemble=127.0.0.1:59036 2024-11-11T16:25:49,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:402150x0, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:25:49,595 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40215-0x1002fa9b94b0000 connected 2024-11-11T16:25:49,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:49,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:49,728 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:25:49,734 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553, hbase.cluster.distributed=false 2024-11-11T16:25:49,769 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:25:49,777 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40215 2024-11-11T16:25:49,783 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40215 2024-11-11T16:25:49,786 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40215 2024-11-11T16:25:49,788 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40215 2024-11-11T16:25:49,789 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40215 2024-11-11T16:25:49,946 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:25:49,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,949 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:25:49,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:49,949 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:25:49,953 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:25:49,956 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:25:49,957 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43519 2024-11-11T16:25:49,959 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43519 connecting to ZooKeeper ensemble=127.0.0.1:59036 2024-11-11T16:25:49,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:49,963 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:49,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:435190x0, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:25:49,975 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43519-0x1002fa9b94b0001 connected 2024-11-11T16:25:49,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:25:49,982 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:25:49,994 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:25:49,997 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:25:50,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:25:50,013 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43519 2024-11-11T16:25:50,014 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43519 2024-11-11T16:25:50,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43519 2024-11-11T16:25:50,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43519 2024-11-11T16:25:50,018 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43519 2024-11-11T16:25:50,047 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:25:50,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,048 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:25:50,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,049 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:25:50,049 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:25:50,050 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:25:50,053 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42465 2024-11-11T16:25:50,055 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42465 connecting to ZooKeeper ensemble=127.0.0.1:59036 2024-11-11T16:25:50,057 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:50,061 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:50,084 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:424650x0, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:25:50,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:424650x0, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:25:50,088 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42465-0x1002fa9b94b0002 connected 2024-11-11T16:25:50,085 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:25:50,097 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:25:50,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:25:50,101 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:25:50,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42465 2024-11-11T16:25:50,103 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42465 2024-11-11T16:25:50,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42465 2024-11-11T16:25:50,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42465 2024-11-11T16:25:50,107 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42465 2024-11-11T16:25:50,127 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:25:50,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:25:50,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:25:50,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:25:50,128 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:25:50,129 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:25:50,133 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43811 2024-11-11T16:25:50,136 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43811 connecting to ZooKeeper ensemble=127.0.0.1:59036 2024-11-11T16:25:50,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:50,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:50,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438110x0, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:25:50,146 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438110x0, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:25:50,147 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:25:50,147 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43811-0x1002fa9b94b0003 connected 2024-11-11T16:25:50,148 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:25:50,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:25:50,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:25:50,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-11T16:25:50,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43811 2024-11-11T16:25:50,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43811 2024-11-11T16:25:50,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-11T16:25:50,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43811 2024-11-11T16:25:50,176 DEBUG [M:0;16b413a53992:40215 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;16b413a53992:40215 2024-11-11T16:25:50,181 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/16b413a53992,40215,1731342348830 2024-11-11T16:25:50,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,192 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/16b413a53992,40215,1731342348830 2024-11-11T16:25:50,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:25:50,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:25:50,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:25:50,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,223 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T16:25:50,225 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/16b413a53992,40215,1731342348830 from backup master directory 2024-11-11T16:25:50,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/16b413a53992,40215,1731342348830 2024-11-11T16:25:50,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,229 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:25:50,229 WARN [master/16b413a53992:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:25:50,230 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=16b413a53992,40215,1731342348830 2024-11-11T16:25:50,232 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T16:25:50,233 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T16:25:50,313 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/hbase.id] with ID: b4307b4d-0d23-478f-a331-4653ccc7b610 2024-11-11T16:25:50,313 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/.tmp/hbase.id 2024-11-11T16:25:50,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:25:50,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:25:50,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:25:50,346 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/.tmp/hbase.id]:[hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/hbase.id] 2024-11-11T16:25:50,426 INFO [master/16b413a53992:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:25:50,431 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T16:25:50,455 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 23ms. 2024-11-11T16:25:50,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:50,472 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:50,473 WARN [IPC Server handler 0 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:50,473 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:25:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:25:50,505 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:25:50,508 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T16:25:50,516 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:50,559 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:50,559 WARN [IPC Server handler 4 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:50,559 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:50,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:25:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:25:50,593 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store 2024-11-11T16:25:50,643 WARN [IPC Server handler 3 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:50,643 WARN [IPC Server handler 3 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:50,643 WARN [IPC Server handler 3 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:50,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:25:50,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:25:50,663 INFO [master/16b413a53992:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T16:25:50,667 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:50,669 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:25:50,669 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:25:50,669 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:25:50,671 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:25:50,671 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:25:50,672 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:25:50,673 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342350669Disabling compacts and flushes for region at 1731342350669Disabling writes for close at 1731342350671 (+2 ms)Writing region close event to WAL at 1731342350672 (+1 ms)Closed at 1731342350672 2024-11-11T16:25:50,676 WARN [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/.initializing 2024-11-11T16:25:50,676 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/WALs/16b413a53992,40215,1731342348830 2024-11-11T16:25:50,688 INFO [master/16b413a53992:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:50,706 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C40215%2C1731342348830, suffix=, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/WALs/16b413a53992,40215,1731342348830, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/oldWALs, maxLogs=10 2024-11-11T16:25:50,745 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/WALs/16b413a53992,40215,1731342348830/16b413a53992%2C40215%2C1731342348830.1731342350711, exclude list is [], retry=0 2024-11-11T16:25:50,749 WARN [IPC Server handler 2 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:50,749 WARN [IPC Server handler 2 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:50,749 WARN [IPC Server handler 2 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:50,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:25:50,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:50,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:50,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-11T16:25:50,833 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/WALs/16b413a53992,40215,1731342348830/16b413a53992%2C40215%2C1731342348830.1731342350711 2024-11-11T16:25:50,837 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:50,838 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:25:50,839 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:50,844 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,845 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,903 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T16:25:50,946 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:50,951 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:50,952 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,956 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T16:25:50,956 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:50,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:50,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,961 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T16:25:50,962 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:50,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:50,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T16:25:50,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:50,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:50,977 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,981 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,985 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,992 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,992 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:50,996 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:25:51,004 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:25:51,011 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:25:51,012 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67785275, jitterRate=0.010079309344291687}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:25:51,021 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731342350865Initializing all the Stores at 1731342350868 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342350869 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342350870 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342350870Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342350870Cleaning up temporary data from old regions at 1731342350992 (+122 ms)Region opened successfully at 1731342351021 (+29 ms) 2024-11-11T16:25:51,023 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T16:25:51,081 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@254b97e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:25:51,128 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T16:25:51,148 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T16:25:51,148 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T16:25:51,153 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T16:25:51,155 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 2 msec 2024-11-11T16:25:51,162 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 7 msec 2024-11-11T16:25:51,163 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T16:25:51,207 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T16:25:51,220 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T16:25:51,223 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T16:25:51,226 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T16:25:51,229 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T16:25:51,231 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T16:25:51,234 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T16:25:51,239 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T16:25:51,242 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T16:25:51,244 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T16:25:51,245 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T16:25:51,268 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T16:25:51,271 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T16:25:51,276 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:25:51,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,282 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=16b413a53992,40215,1731342348830, sessionid=0x1002fa9b94b0000, setting cluster-up flag (Was=false) 2024-11-11T16:25:51,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,307 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T16:25:51,314 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,40215,1731342348830 2024-11-11T16:25:51,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:51,332 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T16:25:51,334 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,40215,1731342348830 2024-11-11T16:25:51,351 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T16:25:51,359 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(746): ClusterId : b4307b4d-0d23-478f-a331-4653ccc7b610 2024-11-11T16:25:51,361 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(746): ClusterId : b4307b4d-0d23-478f-a331-4653ccc7b610 2024-11-11T16:25:51,363 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:25:51,363 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:25:51,367 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(746): ClusterId : b4307b4d-0d23-478f-a331-4653ccc7b610 2024-11-11T16:25:51,367 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:25:51,373 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:25:51,373 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:25:51,374 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:25:51,374 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:25:51,374 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:25:51,374 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:25:51,379 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:25:51,379 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:25:51,380 DEBUG [RS:0;16b413a53992:43519 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44b9aea7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:25:51,380 DEBUG [RS:2;16b413a53992:43811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c7ab7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:25:51,382 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:25:51,382 DEBUG [RS:1;16b413a53992:42465 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2273162a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:25:51,404 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;16b413a53992:43811 2024-11-11T16:25:51,408 INFO [RS:2;16b413a53992:43811 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:25:51,409 INFO [RS:2;16b413a53992:43811 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:25:51,409 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:25:51,409 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;16b413a53992:43519 2024-11-11T16:25:51,409 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;16b413a53992:42465 2024-11-11T16:25:51,409 INFO [RS:0;16b413a53992:43519 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:25:51,409 INFO [RS:0;16b413a53992:43519 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:25:51,410 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:25:51,410 INFO [RS:1;16b413a53992:42465 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:25:51,410 INFO [RS:1;16b413a53992:42465 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:25:51,410 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:25:51,413 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=42465, startcode=1731342350046 2024-11-11T16:25:51,413 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=43811, startcode=1731342350126 2024-11-11T16:25:51,414 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=43519, startcode=1731342349897 2024-11-11T16:25:51,430 DEBUG [RS:0;16b413a53992:43519 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:25:51,430 DEBUG [RS:2;16b413a53992:43811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:25:51,430 DEBUG [RS:1;16b413a53992:42465 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:25:51,475 INFO [AsyncFSWAL-0-hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData-prefix:16b413a53992,40215,1731342348830 {}] compress.Compression(560): Loaded codec org.apache.hadoop.hbase.io.compress.ReusableStreamGzipCodec for compression algorithm GZ 2024-11-11T16:25:51,500 INFO [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34473, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:25:51,500 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55609, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:25:51,500 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T16:25:51,502 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46697, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:25:51,512 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:25:51,529 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T16:25:51,539 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:25:51,542 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:25:51,552 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T16:25:51,559 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 16b413a53992,40215,1731342348830 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T16:25:51,569 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:25:51,569 WARN [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:25:51,569 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:25:51,569 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:25:51,569 WARN [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:25:51,569 WARN [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:25:51,569 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:25:51,570 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:25:51,570 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:25:51,570 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:25:51,570 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/16b413a53992:0, corePoolSize=10, maxPoolSize=10 2024-11-11T16:25:51,570 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,571 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:25:51,571 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,577 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731342381577 2024-11-11T16:25:51,579 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T16:25:51,579 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:25:51,580 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T16:25:51,580 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T16:25:51,584 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T16:25:51,585 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T16:25:51,585 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T16:25:51,585 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T16:25:51,587 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:51,588 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T16:25:51,587 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,597 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T16:25:51,598 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T16:25:51,598 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T16:25:51,605 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T16:25:51,605 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T16:25:51,609 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342351606,5,FailOnTimeoutGroup] 2024-11-11T16:25:51,611 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342351609,5,FailOnTimeoutGroup] 2024-11-11T16:25:51,612 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,612 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T16:25:51,614 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,614 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:25:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:25:51,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:25:51,671 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=43519, startcode=1731342349897 2024-11-11T16:25:51,674 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,43519,1731342349897 2024-11-11T16:25:51,674 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=43811, startcode=1731342350126 2024-11-11T16:25:51,675 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,40215,1731342348830 with port=42465, startcode=1731342350046 2024-11-11T16:25:51,677 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(517): Registering regionserver=16b413a53992,43519,1731342349897 2024-11-11T16:25:51,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,42465,1731342350046 2024-11-11T16:25:51,693 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(517): Registering regionserver=16b413a53992,42465,1731342350046 2024-11-11T16:25:51,693 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 2024-11-11T16:25:51,693 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39605 2024-11-11T16:25:51,693 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:25:51,701 DEBUG [RS:0;16b413a53992:43519 {}] zookeeper.ZKUtil(111): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,43519,1731342349897 2024-11-11T16:25:51,701 WARN [RS:0;16b413a53992:43519 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:25:51,701 INFO [RS:0;16b413a53992:43519 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:51,701 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,43811,1731342350126 2024-11-11T16:25:51,701 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897 2024-11-11T16:25:51,702 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40215 {}] master.ServerManager(517): Registering regionserver=16b413a53992,43811,1731342350126 2024-11-11T16:25:51,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:25:51,703 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 2024-11-11T16:25:51,703 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39605 2024-11-11T16:25:51,703 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:25:51,721 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 2024-11-11T16:25:51,722 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39605 2024-11-11T16:25:51,722 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:25:51,725 DEBUG [RS:1;16b413a53992:42465 {}] zookeeper.ZKUtil(111): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,42465,1731342350046 2024-11-11T16:25:51,725 WARN [RS:1;16b413a53992:42465 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:25:51,725 INFO [RS:1;16b413a53992:42465 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:51,726 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,42465,1731342350046 2024-11-11T16:25:51,731 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,43519,1731342349897] 2024-11-11T16:25:51,731 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,43811,1731342350126] 2024-11-11T16:25:51,731 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,42465,1731342350046] 2024-11-11T16:25:51,736 DEBUG [RS:2;16b413a53992:43811 {}] zookeeper.ZKUtil(111): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,43811,1731342350126 2024-11-11T16:25:51,736 WARN [RS:2;16b413a53992:43811 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:25:51,736 INFO [RS:2;16b413a53992:43811 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:51,737 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126 2024-11-11T16:25:51,784 INFO [RS:0;16b413a53992:43519 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:25:51,788 INFO [RS:1;16b413a53992:42465 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:25:51,813 INFO [RS:2;16b413a53992:43811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:25:51,825 INFO [RS:2;16b413a53992:43811 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:25:51,835 INFO [RS:1;16b413a53992:42465 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:25:51,833 INFO [RS:0;16b413a53992:43519 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:25:51,839 INFO [RS:2;16b413a53992:43811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:25:51,840 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,842 INFO [RS:1;16b413a53992:42465 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:25:51,842 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,847 INFO [RS:0;16b413a53992:43519 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:25:51,847 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,861 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:25:51,861 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:25:51,868 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:25:51,871 INFO [RS:1;16b413a53992:42465 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:25:51,873 INFO [RS:0;16b413a53992:43519 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:25:51,873 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,873 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,873 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,873 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,874 INFO [RS:2;16b413a53992:43811 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:25:51,874 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:25:51,875 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,875 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,875 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:25:51,876 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:0;16b413a53992:43519 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,876 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,877 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,877 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,877 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,877 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,877 DEBUG [RS:2;16b413a53992:43811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,877 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,878 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:25:51,879 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,879 DEBUG [RS:1;16b413a53992:42465 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:25:51,896 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,897 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,898 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,898 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,898 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,898 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,43519,1731342349897-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,917 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,43811,1731342350126-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:25:51,952 INFO [RS:0;16b413a53992:43519 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:25:51,956 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,43519,1731342349897-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,956 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:0;16b413a53992:43519 {}] regionserver.Replication(171): 16b413a53992,43519,1731342349897 started 2024-11-11T16:25:51,957 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,957 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42465,1731342350046-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:25:51,962 INFO [RS:2;16b413a53992:43811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:25:51,962 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,43811,1731342350126-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,962 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,963 INFO [RS:2;16b413a53992:43811 {}] regionserver.Replication(171): 16b413a53992,43811,1731342350126 started 2024-11-11T16:25:51,987 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:51,988 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,43811,1731342350126, RpcServer on 16b413a53992/172.17.0.2:43811, sessionid=0x1002fa9b94b0003 2024-11-11T16:25:51,989 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:25:51,989 DEBUG [RS:2;16b413a53992:43811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,43811,1731342350126 2024-11-11T16:25:51,989 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,43811,1731342350126' 2024-11-11T16:25:51,990 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:25:52,000 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:25:52,001 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:25:52,001 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:25:52,001 DEBUG [RS:2;16b413a53992:43811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,43811,1731342350126 2024-11-11T16:25:52,001 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,43811,1731342350126' 2024-11-11T16:25:52,001 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:25:52,002 INFO [RS:1;16b413a53992:42465 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:25:52,002 DEBUG [RS:2;16b413a53992:43811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:25:52,002 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42465,1731342350046-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:52,003 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:52,003 INFO [RS:1;16b413a53992:42465 {}] regionserver.Replication(171): 16b413a53992,42465,1731342350046 started 2024-11-11T16:25:52,004 DEBUG [RS:2;16b413a53992:43811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:25:52,004 INFO [RS:2;16b413a53992:43811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:25:52,004 INFO [RS:2;16b413a53992:43811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:25:52,015 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:52,015 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,43519,1731342349897, RpcServer on 16b413a53992/172.17.0.2:43519, sessionid=0x1002fa9b94b0001 2024-11-11T16:25:52,016 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:25:52,016 DEBUG [RS:0;16b413a53992:43519 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,43519,1731342349897 2024-11-11T16:25:52,016 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,43519,1731342349897' 2024-11-11T16:25:52,016 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:25:52,017 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:25:52,019 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:25:52,019 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:25:52,019 DEBUG [RS:0;16b413a53992:43519 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,43519,1731342349897 2024-11-11T16:25:52,019 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,43519,1731342349897' 2024-11-11T16:25:52,019 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:25:52,021 DEBUG [RS:0;16b413a53992:43519 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:25:52,022 DEBUG [RS:0;16b413a53992:43519 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:25:52,022 INFO [RS:0;16b413a53992:43519 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:25:52,022 INFO [RS:0;16b413a53992:43519 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:25:52,028 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:52,028 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,42465,1731342350046, RpcServer on 16b413a53992/172.17.0.2:42465, sessionid=0x1002fa9b94b0002 2024-11-11T16:25:52,029 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:25:52,029 DEBUG [RS:1;16b413a53992:42465 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,42465,1731342350046 2024-11-11T16:25:52,029 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42465,1731342350046' 2024-11-11T16:25:52,029 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:25:52,033 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:25:52,034 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:25:52,034 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:25:52,034 DEBUG [RS:1;16b413a53992:42465 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,42465,1731342350046 2024-11-11T16:25:52,034 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42465,1731342350046' 2024-11-11T16:25:52,034 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:25:52,035 DEBUG [RS:1;16b413a53992:42465 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:25:52,036 DEBUG [RS:1;16b413a53992:42465 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:25:52,036 INFO [RS:1;16b413a53992:42465 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:25:52,036 INFO [RS:1;16b413a53992:42465 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:25:52,050 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T16:25:52,051 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 2024-11-11T16:25:52,111 INFO [RS:2;16b413a53992:43811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:52,120 INFO [RS:2;16b413a53992:43811 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C43811%2C1731342350126, suffix=, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs, maxLogs=32 2024-11-11T16:25:52,123 INFO [RS:0;16b413a53992:43519 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:52,134 INFO [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C43519%2C1731342349897, suffix=, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs, maxLogs=32 2024-11-11T16:25:52,137 INFO [RS:1;16b413a53992:42465 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:52,142 INFO [RS:1;16b413a53992:42465 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C42465%2C1731342350046, suffix=, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,42465,1731342350046, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs, maxLogs=32 2024-11-11T16:25:52,157 DEBUG [RS:2;16b413a53992:43811 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126/16b413a53992%2C43811%2C1731342350126.1731342352124, exclude list is [], retry=0 2024-11-11T16:25:52,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:25:52,168 DEBUG [RS:0;16b413a53992:43519 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897/16b413a53992%2C43519%2C1731342349897.1731342352137, exclude list is [], retry=0 2024-11-11T16:25:52,172 DEBUG [RS:1;16b413a53992:42465 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,42465,1731342350046/16b413a53992%2C42465%2C1731342350046.1731342352145, exclude list is [], retry=0 2024-11-11T16:25:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:25:52,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:25:52,181 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,181 WARN [IPC Server handler 0 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,181 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,184 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,185 WARN [IPC Server handler 4 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,185 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:52,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:52,184 WARN [IPC Server handler 2 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,188 WARN [IPC Server handler 2 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,189 WARN [IPC Server handler 2 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,193 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:52,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:52,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:52,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:52,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:52,205 INFO [RS:2;16b413a53992:43811 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126/16b413a53992%2C43811%2C1731342350126.1731342352124 2024-11-11T16:25:52,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:25:52,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:25:52,212 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,216 INFO [RS:1;16b413a53992:42465 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,42465,1731342350046/16b413a53992%2C42465%2C1731342350046.1731342352145 2024-11-11T16:25:52,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:25:52,219 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:25:52,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:25:52,223 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:25:52,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:25:52,226 INFO [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897/16b413a53992%2C43519%2C1731342349897.1731342352137 2024-11-11T16:25:52,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:25:52,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,229 DEBUG [RS:2;16b413a53992:43811 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:52,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:25:52,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:25:52,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:25:52,233 DEBUG [RS:1;16b413a53992:42465 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:52,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:25:52,235 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:25:52,236 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:25:52,236 DEBUG [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:52,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:25:52,247 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:25:52,248 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66497935, jitterRate=-0.009103551506996155}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:25:52,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731342352193Initializing all the Stores at 1731342352197 (+4 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352198 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352205 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342352205Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352205Cleaning up temporary data from old regions at 1731342352235 (+30 ms)Region opened successfully at 1731342352250 (+15 ms) 2024-11-11T16:25:52,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:25:52,250 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:25:52,250 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:25:52,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:25:52,251 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:25:52,253 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:25:52,253 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342352250Disabling compacts and flushes for region at 1731342352250Disabling writes for close at 1731342352251 (+1 ms)Writing region close event to WAL at 1731342352253 (+2 ms)Closed at 1731342352253 2024-11-11T16:25:52,258 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:25:52,259 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T16:25:52,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T16:25:52,281 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:25:52,285 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T16:25:52,355 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,355 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,355 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,356 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,356 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,356 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,356 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,357 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,357 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,357 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,357 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,358 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,439 DEBUG [16b413a53992:40215 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T16:25:52,449 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:25:52,463 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:25:52,463 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:25:52,463 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:25:52,463 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:25:52,489 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:25:52,503 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,43811,1731342350126, state=OPENING 2024-11-11T16:25:52,509 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T16:25:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:52,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:52,512 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:25:52,513 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,513 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,514 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,514 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,517 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:25:52,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,43811,1731342350126}] 2024-11-11T16:25:52,705 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:25:52,709 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46537, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:25:52,723 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T16:25:52,724 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:52,724 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T16:25:52,728 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C43811%2C1731342350126.meta, suffix=.meta, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs, maxLogs=32 2024-11-11T16:25:52,746 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, exclude list is [], retry=0 2024-11-11T16:25:52,749 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:52,750 WARN [IPC Server handler 4 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:52,750 WARN [IPC Server handler 4 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:52,756 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta 2024-11-11T16:25:52,756 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:52,757 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:25:52,759 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T16:25:52,762 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T16:25:52,769 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T16:25:52,774 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T16:25:52,775 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:52,775 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T16:25:52,775 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T16:25:52,779 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:25:52,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:25:52,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:25:52,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:25:52,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:25:52,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:25:52,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:25:52,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:25:52,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:52,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:25:52,791 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:25:52,793 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:25:52,797 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:25:52,800 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:25:52,800 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:25:52,801 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:25:52,804 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:25:52,806 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60971349, jitterRate=-0.0914561003446579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:25:52,806 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T16:25:52,808 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731342352776Writing region info on filesystem at 1731342352776Initializing all the Stores at 1731342352778 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352778Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352779 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342352779Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342352779Cleaning up temporary data from old regions at 1731342352800 (+21 ms)Running coprocessor post-open hooks at 1731342352806 (+6 ms)Region opened successfully at 1731342352808 (+2 ms) 2024-11-11T16:25:52,817 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731342352694 2024-11-11T16:25:52,833 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T16:25:52,834 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T16:25:52,836 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:25:52,839 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,43811,1731342350126, state=OPEN 2024-11-11T16:25:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:25:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:25:52,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:25:52,849 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,849 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,849 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,849 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b413a53992,43811,1731342350126 2024-11-11T16:25:52,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:25:52,853 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:25:52,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T16:25:52,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,43811,1731342350126 in 331 msec 2024-11-11T16:25:52,869 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T16:25:52,870 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 596 msec 2024-11-11T16:25:52,871 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:25:52,871 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T16:25:52,902 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:25:52,904 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1] 2024-11-11T16:25:52,929 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:25:52,932 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49601, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:25:52,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5620 sec 2024-11-11T16:25:52,955 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731342352955, completionTime=-1 2024-11-11T16:25:52,959 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T16:25:52,959 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T16:25:53,007 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-11T16:25:53,007 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731342413007 2024-11-11T16:25:53,007 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731342473007 2024-11-11T16:25:53,007 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 48 msec 2024-11-11T16:25:53,010 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-11T16:25:53,031 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,031 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,032 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,068 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-16b413a53992:40215, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,069 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,084 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,091 DEBUG [master/16b413a53992:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T16:25:53,130 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.900sec 2024-11-11T16:25:53,132 INFO [master/16b413a53992:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T16:25:53,134 INFO [master/16b413a53992:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T16:25:53,135 INFO [master/16b413a53992:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T16:25:53,136 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T16:25:53,136 INFO [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T16:25:53,137 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:25:53,138 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T16:25:53,145 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T16:25:53,146 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T16:25:53,146 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,40215,1731342348830-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:25:53,183 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73581c7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:25:53,184 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 16b413a53992,40215,-1 for getting cluster id 2024-11-11T16:25:53,188 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T16:25:53,199 DEBUG [HMaster-EventLoopGroup-2-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'b4307b4d-0d23-478f-a331-4653ccc7b610' 2024-11-11T16:25:53,202 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T16:25:53,203 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "b4307b4d-0d23-478f-a331-4653ccc7b610" 2024-11-11T16:25:53,203 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37f2d540, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:25:53,203 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [16b413a53992,40215,-1] 2024-11-11T16:25:53,206 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T16:25:53,209 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:25:53,211 INFO [HMaster-EventLoopGroup-2-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38828, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T16:25:53,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3334605f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:25:53,215 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:25:53,226 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1] 2024-11-11T16:25:53,227 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:25:53,230 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49836, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:25:53,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=16b413a53992,40215,1731342348830 2024-11-11T16:25:53,255 INFO [Time-limited test {}] wal.AbstractTestWALReplay(147): hbase.rootdir=hdfs://localhost:39605/hbase 2024-11-11T16:25:53,277 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=349, OpenFileDescriptor=577, MaxFileDescriptor=1048576, SystemLoadAverage=692, ProcessCount=11, AvailableMemoryMB=3400 2024-11-11T16:25:53,299 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:53,303 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:53,304 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:53,310 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-61595226, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-61595226, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:53,333 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-61595226/hregion-61595226.1731342353311, exclude list is [], retry=0 2024-11-11T16:25:53,341 DEBUG [AsyncFSWAL-8-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:53,342 DEBUG [AsyncFSWAL-8-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:53,343 DEBUG [AsyncFSWAL-8-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:53,351 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-61595226/hregion-61595226.1731342353311 2024-11-11T16:25:53,353 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:53,353 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 383dc1b1e5356ff54781671f7594686c, NAME => 'testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:25:53,359 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:53,359 WARN [IPC Server handler 0 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:53,359 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:53,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741838_1014 (size=64) 2024-11-11T16:25:53,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741838_1014 (size=64) 2024-11-11T16:25:53,371 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:53,373 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,376 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 383dc1b1e5356ff54781671f7594686c columnFamilyName a 2024-11-11T16:25:53,377 DEBUG [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:53,378 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] regionserver.HStore(327): Store=383dc1b1e5356ff54781671f7594686c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:53,378 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,379 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,380 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,380 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,380 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,383 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,388 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:25:53,389 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 383dc1b1e5356ff54781671f7594686c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63707814, jitterRate=-0.05067959427833557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:25:53,390 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 383dc1b1e5356ff54781671f7594686c: Writing region info on filesystem at 1731342353371Initializing all the Stores at 1731342353373 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342353373Cleaning up temporary data from old regions at 1731342353380 (+7 ms)Region opened successfully at 1731342353390 (+10 ms) 2024-11-11T16:25:53,390 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 383dc1b1e5356ff54781671f7594686c, disabling compactions & flushes 2024-11-11T16:25:53,391 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. 2024-11-11T16:25:53,391 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. 2024-11-11T16:25:53,391 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. after waiting 0 ms 2024-11-11T16:25:53,391 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. 2024-11-11T16:25:53,391 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. 2024-11-11T16:25:53,391 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 383dc1b1e5356ff54781671f7594686c: Waiting for close lock at 1731342353390Disabling compacts and flushes for region at 1731342353390Disabling writes for close at 1731342353391 (+1 ms)Writing region close event to WAL at 1731342353391Closed at 1731342353391 2024-11-11T16:25:53,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741837_1013 (size=95) 2024-11-11T16:25:53,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741837_1013 (size=95) 2024-11-11T16:25:53,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741837_1013 (size=95) 2024-11-11T16:25:53,406 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:25:53,406 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-61595226:(num 1731342353311) 2024-11-11T16:25:53,408 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-11T16:25:53,415 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:53,415 WARN [IPC Server handler 0 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:53,416 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:53,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741839_1015 (size=320) 2024-11-11T16:25:53,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741839_1015 (size=320) 2024-11-11T16:25:53,430 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-11T16:25:53,433 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:53,433 WARN [IPC Server handler 0 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:53,433 WARN [IPC Server handler 0 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:53,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741840_1016 (size=253) 2024-11-11T16:25:53,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741840_1016 (size=253) 2024-11-11T16:25:53,479 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1, size=320 (320bytes) 2024-11-11T16:25:53,480 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-11T16:25:53,480 DEBUG [Time-limited test {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-11T16:25:53,481 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1 2024-11-11T16:25:53,488 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1 after 5ms 2024-11-11T16:25:53,495 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:53,496 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1 took 19ms 2024-11-11T16:25:53,512 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1 so closing down 2024-11-11T16:25:53,512 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:25:53,517 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-11T16:25:53,519 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp 2024-11-11T16:25:53,521 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:25:53,523 WARN [IPC Server handler 3 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:25:53,523 WARN [IPC Server handler 3 on default port 39605 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:25:53,523 WARN [IPC Server handler 3 on default port 39605 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:25:53,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741841_1017 (size=320) 2024-11-11T16:25:53,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741841_1017 (size=320) 2024-11-11T16:25:53,556 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-11T16:25:53,559 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002 2024-11-11T16:25:53,564 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 62 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-11T16:25:53,565 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1, journal: Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1, size=320 (320bytes) at 1731342353479Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1 so closing down at 1731342353512 (+33 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp at 1731342353519 (+7 ms)3 split writer threads finished at 1731342353521 (+2 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731342353556 (+35 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002 at 1731342353559 (+3 ms)Processed 2 edits across 1 Regions in 62 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731342353565 (+6 ms) 2024-11-11T16:25:53,581 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2, size=253 (253bytes) 2024-11-11T16:25:53,581 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2 2024-11-11T16:25:53,582 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2 after 1ms 2024-11-11T16:25:53,587 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:53,587 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2 took 6ms 2024-11-11T16:25:53,592 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2 so closing down 2024-11-11T16:25:53,592 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:25:53,595 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-11T16:25:53,598 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002-wal-2.temp 2024-11-11T16:25:53,598 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:25:53,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741842_1018 (size=253) 2024-11-11T16:25:53,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741842_1018 (size=253) 2024-11-11T16:25:53,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741842_1018 (size=253) 2024-11-11T16:25:53,644 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-11T16:25:53,650 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:53,653 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(184): Found existing old edits file and we have less entries. Deleting hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002-wal-2.temp, length=253 2024-11-11T16:25:53,656 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 67 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-11T16:25:53,656 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2, journal: Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2, size=253 (253bytes) at 1731342353581Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2 so closing down at 1731342353592 (+11 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002-wal-2.temp at 1731342353598 (+6 ms)3 split writer threads finished at 1731342353598Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731342353644 (+46 ms)Processed 1 edits across 1 Regions in 67 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731342353656 (+12 ms) 2024-11-11T16:25:53,656 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:25:53,659 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:53,678 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal.1731342353660, exclude list is [], retry=0 2024-11-11T16:25:53,684 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:53,685 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:53,685 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:53,693 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal.1731342353660 2024-11-11T16:25:53,694 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:25:53,694 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 383dc1b1e5356ff54781671f7594686c, NAME => 'testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:25:53,694 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:53,694 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,694 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,698 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,699 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 383dc1b1e5356ff54781671f7594686c columnFamilyName a 2024-11-11T16:25:53,699 DEBUG [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:53,704 INFO [StoreOpener-383dc1b1e5356ff54781671f7594686c-1 {}] regionserver.HStore(327): Store=383dc1b1e5356ff54781671f7594686c/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:53,704 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,706 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,709 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,713 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002 2024-11-11T16:25:53,718 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:53,728 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002 2024-11-11T16:25:53,732 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 383dc1b1e5356ff54781671f7594686c 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-11T16:25:53,799 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/.tmp/a/32107566cf484ac5b55a9512b2d5b8fb is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731342353406/Put/seqid=0 2024-11-11T16:25:53,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741844_1020 (size=5170) 2024-11-11T16:25:53,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741844_1020 (size=5170) 2024-11-11T16:25:53,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741844_1020 (size=5170) 2024-11-11T16:25:53,833 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/.tmp/a/32107566cf484ac5b55a9512b2d5b8fb 2024-11-11T16:25:53,900 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/.tmp/a/32107566cf484ac5b55a9512b2d5b8fb as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/a/32107566cf484ac5b55a9512b2d5b8fb 2024-11-11T16:25:53,914 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/a/32107566cf484ac5b55a9512b2d5b8fb, entries=2, sequenceid=2, filesize=5.0 K 2024-11-11T16:25:53,921 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 383dc1b1e5356ff54781671f7594686c in 187ms, sequenceid=2, compaction requested=false; wal=null 2024-11-11T16:25:53,923 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/0000000000000000002 2024-11-11T16:25:53,924 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,925 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,930 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 383dc1b1e5356ff54781671f7594686c 2024-11-11T16:25:53,939 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/383dc1b1e5356ff54781671f7594686c/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-11T16:25:53,941 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 383dc1b1e5356ff54781671f7594686c; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66025913, jitterRate=-0.016137227416038513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:25:53,943 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 383dc1b1e5356ff54781671f7594686c: Writing region info on filesystem at 1731342353695Initializing all the Stores at 1731342353697 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342353697Obtaining lock to block concurrent updates at 1731342353732 (+35 ms)Preparing flush snapshotting stores in 383dc1b1e5356ff54781671f7594686c at 1731342353732Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731342353736 (+4 ms)Flushing stores of testReplayEditsWrittenIntoWAL,,1731342353300.383dc1b1e5356ff54781671f7594686c. at 1731342353736Flushing 383dc1b1e5356ff54781671f7594686c/a: creating writer at 1731342353737 (+1 ms)Flushing 383dc1b1e5356ff54781671f7594686c/a: appending metadata at 1731342353783 (+46 ms)Flushing 383dc1b1e5356ff54781671f7594686c/a: closing flushed file at 1731342353786 (+3 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65de1a9d: reopening flushed file at 1731342353898 (+112 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 383dc1b1e5356ff54781671f7594686c in 187ms, sequenceid=2, compaction requested=false; wal=null at 1731342353921 (+23 ms)Cleaning up temporary data from old regions at 1731342353925 (+4 ms)Region opened successfully at 1731342353943 (+18 ms) 2024-11-11T16:25:53,997 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit0 Thread=361 (was 349) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47108 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:39605/hbase-prefix:default java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:38940 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: TestAsyncWALReplay-pool-0 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47116 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47190 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47126 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-8-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741843_1019, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:38872 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=673 (was 577) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=692 (was 692), ProcessCount=11 (was 11), AvailableMemoryMB=3380 (was 3400) 2024-11-11T16:25:54,015 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=361, OpenFileDescriptor=673, MaxFileDescriptor=1048576, SystemLoadAverage=692, ProcessCount=11, AvailableMemoryMB=3379 2024-11-11T16:25:54,040 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:54,044 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:54,047 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:54,052 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-28056233, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-28056233, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:54,074 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-28056233/hregion-28056233.1731342354053, exclude list is [], retry=0 2024-11-11T16:25:54,080 DEBUG [AsyncFSWAL-10-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:54,085 DEBUG [AsyncFSWAL-10-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:54,087 DEBUG [AsyncFSWAL-10-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:54,119 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-28056233/hregion-28056233.1731342354053 2024-11-11T16:25:54,121 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:25:54,121 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 753b23e4a659b4f590c97a83346286a5, NAME => 'testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:25:54,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741846_1022 (size=64) 2024-11-11T16:25:54,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741846_1022 (size=64) 2024-11-11T16:25:54,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741846_1022 (size=64) 2024-11-11T16:25:54,179 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:54,181 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,185 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 753b23e4a659b4f590c97a83346286a5 columnFamilyName a 2024-11-11T16:25:54,185 DEBUG [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:54,185 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] regionserver.HStore(327): Store=753b23e4a659b4f590c97a83346286a5/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:54,186 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,187 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,187 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,188 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,188 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,191 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,196 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:25:54,197 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 753b23e4a659b4f590c97a83346286a5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60302940, jitterRate=-0.10141617059707642}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:25:54,197 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 753b23e4a659b4f590c97a83346286a5: Writing region info on filesystem at 1731342354179Initializing all the Stores at 1731342354180 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342354181 (+1 ms)Cleaning up temporary data from old regions at 1731342354188 (+7 ms)Region opened successfully at 1731342354197 (+9 ms) 2024-11-11T16:25:54,198 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 753b23e4a659b4f590c97a83346286a5, disabling compactions & flushes 2024-11-11T16:25:54,198 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. 2024-11-11T16:25:54,198 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. 2024-11-11T16:25:54,198 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. after waiting 0 ms 2024-11-11T16:25:54,198 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. 2024-11-11T16:25:54,201 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. 2024-11-11T16:25:54,201 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 753b23e4a659b4f590c97a83346286a5: Waiting for close lock at 1731342354198Disabling compacts and flushes for region at 1731342354198Disabling writes for close at 1731342354198Writing region close event to WAL at 1731342354201 (+3 ms)Closed at 1731342354201 2024-11-11T16:25:54,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741845_1021 (size=95) 2024-11-11T16:25:54,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741845_1021 (size=95) 2024-11-11T16:25:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741845_1021 (size=95) 2024-11-11T16:25:54,216 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:25:54,216 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-28056233:(num 1731342354053) 2024-11-11T16:25:54,218 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-11T16:25:54,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741847_1023 (size=320) 2024-11-11T16:25:54,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741847_1023 (size=320) 2024-11-11T16:25:54,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741847_1023 (size=320) 2024-11-11T16:25:54,256 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor testMonitor 2024-11-11T16:25:54,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741848_1024 (size=253) 2024-11-11T16:25:54,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741848_1024 (size=253) 2024-11-11T16:25:54,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741848_1024 (size=253) 2024-11-11T16:25:54,313 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2, size=253 (253bytes) 2024-11-11T16:25:54,313 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2 2024-11-11T16:25:54,317 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2 after 4ms 2024-11-11T16:25:54,337 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:54,338 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2 took 26ms 2024-11-11T16:25:54,344 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2 so closing down 2024-11-11T16:25:54,344 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:25:54,347 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000002-wal-2.temp 2024-11-11T16:25:54,349 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp 2024-11-11T16:25:54,349 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:25:54,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741849_1025 (size=253) 2024-11-11T16:25:54,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741849_1025 (size=253) 2024-11-11T16:25:54,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741849_1025 (size=253) 2024-11-11T16:25:54,386 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-11T16:25:54,388 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 2024-11-11T16:25:54,389 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 1 edits across 1 Regions in 50 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2, size=253, length=253, corrupted=false, cancelled=false 2024-11-11T16:25:54,389 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2, journal: Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2, size=253 (253bytes) at 1731342354313Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2 so closing down at 1731342354344 (+31 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp at 1731342354349 (+5 ms)3 split writer threads finished at 1731342354350 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731342354386 (+36 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002-wal-2.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 at 1731342354388 (+2 ms)Processed 1 edits across 1 Regions in 50 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-2, size=253, length=253, corrupted=false, cancelled=false at 1731342354389 (+1 ms) 2024-11-11T16:25:54,407 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1, size=320 (320bytes) 2024-11-11T16:25:54,407 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1 2024-11-11T16:25:54,408 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1 after 1ms 2024-11-11T16:25:54,413 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:54,413 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1 took 6ms 2024-11-11T16:25:54,416 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1 so closing down 2024-11-11T16:25:54,417 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:25:54,419 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal-1.temp 2024-11-11T16:25:54,430 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp 2024-11-11T16:25:54,431 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:25:54,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741850_1026 (size=320) 2024-11-11T16:25:54,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741850_1026 (size=320) 2024-11-11T16:25:54,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741850_1026 (size=320) 2024-11-11T16:25:54,461 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-11T16:25:54,466 DEBUG [split-log-closeStream-pool-0 {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:54,469 WARN [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(175): Found existing old edits file. It could be the result of a previous failed split attempt or we have duplicated wal entries. Deleting hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002, length=253 2024-11-11T16:25:54,478 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 2024-11-11T16:25:54,478 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 65 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1, size=320, length=320, corrupted=false, cancelled=false 2024-11-11T16:25:54,478 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1, journal: Splitting hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1, size=320 (320bytes) at 1731342354407Finishing writing output for hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1 so closing down at 1731342354416 (+9 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp at 1731342354430 (+14 ms)3 split writer threads finished at 1731342354431 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731342354461 (+30 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000001-wal-1.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 at 1731342354478 (+17 ms)Processed 2 edits across 1 Regions in 65 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal-1, size=320, length=320, corrupted=false, cancelled=false at 1731342354478 2024-11-11T16:25:54,479 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:25:54,481 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:54,497 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal.1731342354482, exclude list is [], retry=0 2024-11-11T16:25:54,503 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:54,503 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:54,504 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:54,513 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal.1731342354482 2024-11-11T16:25:54,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:25:54,516 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 753b23e4a659b4f590c97a83346286a5, NAME => 'testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:25:54,516 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:54,517 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,517 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,525 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,527 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 753b23e4a659b4f590c97a83346286a5 columnFamilyName a 2024-11-11T16:25:54,527 DEBUG [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:54,528 INFO [StoreOpener-753b23e4a659b4f590c97a83346286a5-1 {}] regionserver.HStore(327): Store=753b23e4a659b4f590c97a83346286a5/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:54,529 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,530 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,533 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,535 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 2024-11-11T16:25:54,539 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:54,541 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 2, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=2, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 2024-11-11T16:25:54,541 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 753b23e4a659b4f590c97a83346286a5 1/1 column families, dataSize=108 B heapSize=512 B 2024-11-11T16:25:54,568 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/.tmp/a/07e735d1d5674afb9421018882ea6498 is 58, key is testReplayEditsWrittenIntoWAL/a:1/1731342354216/Put/seqid=0 2024-11-11T16:25:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741852_1028 (size=5170) 2024-11-11T16:25:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741852_1028 (size=5170) 2024-11-11T16:25:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741852_1028 (size=5170) 2024-11-11T16:25:54,590 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=108 B at sequenceid=2 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/.tmp/a/07e735d1d5674afb9421018882ea6498 2024-11-11T16:25:54,605 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/.tmp/a/07e735d1d5674afb9421018882ea6498 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/a/07e735d1d5674afb9421018882ea6498 2024-11-11T16:25:54,630 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/a/07e735d1d5674afb9421018882ea6498, entries=2, sequenceid=2, filesize=5.0 K 2024-11-11T16:25:54,631 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 753b23e4a659b4f590c97a83346286a5 in 90ms, sequenceid=2, compaction requested=false; wal=null 2024-11-11T16:25:54,633 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/0000000000000000002 2024-11-11T16:25:54,634 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,634 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,638 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 753b23e4a659b4f590c97a83346286a5 2024-11-11T16:25:54,644 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/753b23e4a659b4f590c97a83346286a5/recovered.edits/2.seqid, newMaxSeqId=2, maxSeqId=1 2024-11-11T16:25:54,646 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 753b23e4a659b4f590c97a83346286a5; next sequenceid=3; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61608931, jitterRate=-0.08195538818836212}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:25:54,647 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 753b23e4a659b4f590c97a83346286a5: Writing region info on filesystem at 1731342354517Initializing all the Stores at 1731342354519 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342354519Obtaining lock to block concurrent updates at 1731342354541 (+22 ms)Preparing flush snapshotting stores in 753b23e4a659b4f590c97a83346286a5 at 1731342354541Finished memstore snapshotting testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5., syncing WAL and waiting on mvcc, flushsize=dataSize=108, getHeapSize=496, getOffHeapSize=0, getCellsCount=2 at 1731342354541Flushing stores of testReplayEditsWrittenIntoWAL,,1731342354041.753b23e4a659b4f590c97a83346286a5. at 1731342354541Flushing 753b23e4a659b4f590c97a83346286a5/a: creating writer at 1731342354542 (+1 ms)Flushing 753b23e4a659b4f590c97a83346286a5/a: appending metadata at 1731342354566 (+24 ms)Flushing 753b23e4a659b4f590c97a83346286a5/a: closing flushed file at 1731342354566Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@941ee67: reopening flushed file at 1731342354603 (+37 ms)Finished flush of dataSize ~108 B/108, heapSize ~496 B/496, currentSize=0 B/0 for 753b23e4a659b4f590c97a83346286a5 in 90ms, sequenceid=2, compaction requested=false; wal=null at 1731342354631 (+28 ms)Cleaning up temporary data from old regions at 1731342354634 (+3 ms)Region opened successfully at 1731342354646 (+12 ms) 2024-11-11T16:25:54,672 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testNameConflictWhenSplit1 Thread=371 (was 361) Potentially hanging thread: AsyncFSWAL-10-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47108 [Waiting for operation #26] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47116 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47206 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47250 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-10-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:39008 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741851_1027, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=755 (was 673) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=692 (was 692), ProcessCount=11 (was 11), AvailableMemoryMB=3363 (was 3379) 2024-11-11T16:25:54,691 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=371, OpenFileDescriptor=755, MaxFileDescriptor=1048576, SystemLoadAverage=692, ProcessCount=11, AvailableMemoryMB=3362 2024-11-11T16:25:54,712 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:54,715 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:25:54,716 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:25:54,720 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-33971538, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-33971538, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:54,741 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-33971538/hregion-33971538.1731342354721, exclude list is [], retry=0 2024-11-11T16:25:54,746 DEBUG [AsyncFSWAL-12-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:54,747 DEBUG [AsyncFSWAL-12-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:54,758 DEBUG [AsyncFSWAL-12-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:54,782 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-33971538/hregion-33971538.1731342354721 2024-11-11T16:25:54,787 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:25:54,788 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 3753c95cc70b081016fdbd80fb1b3344, NAME => 'testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenIntoWAL', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:25:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741854_1030 (size=64) 2024-11-11T16:25:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741854_1030 (size=64) 2024-11-11T16:25:54,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741854_1030 (size=64) 2024-11-11T16:25:54,903 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:54,906 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,909 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName a 2024-11-11T16:25:54,909 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:54,910 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:54,910 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,913 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName b 2024-11-11T16:25:54,913 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:54,915 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:54,915 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,923 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName c 2024-11-11T16:25:54,923 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:54,924 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:54,924 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,929 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,930 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,932 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,932 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,933 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:25:54,936 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:54,949 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:25:54,950 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3753c95cc70b081016fdbd80fb1b3344; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59574202, jitterRate=-0.11227521300315857}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:25:54,951 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3753c95cc70b081016fdbd80fb1b3344: Writing region info on filesystem at 1731342354903Initializing all the Stores at 1731342354905 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342354905Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342354906 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342354906Cleaning up temporary data from old regions at 1731342354932 (+26 ms)Region opened successfully at 1731342354950 (+18 ms) 2024-11-11T16:25:54,951 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3753c95cc70b081016fdbd80fb1b3344, disabling compactions & flushes 2024-11-11T16:25:54,951 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:25:54,951 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:25:54,951 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. after waiting 0 ms 2024-11-11T16:25:54,951 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:25:54,952 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:25:54,952 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3753c95cc70b081016fdbd80fb1b3344: Waiting for close lock at 1731342354951Disabling compacts and flushes for region at 1731342354951Disabling writes for close at 1731342354951Writing region close event to WAL at 1731342354952 (+1 ms)Closed at 1731342354952 2024-11-11T16:25:54,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741853_1029 (size=95) 2024-11-11T16:25:54,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741853_1029 (size=95) 2024-11-11T16:25:54,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741853_1029 (size=95) 2024-11-11T16:25:54,976 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:25:54,976 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-33971538:(num 1731342354721) 2024-11-11T16:25:54,977 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:25:54,982 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:55,003 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, exclude list is [], retry=0 2024-11-11T16:25:55,008 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:55,008 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:55,009 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:55,029 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 2024-11-11T16:25:55,033 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:55,499 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, size=0 (0bytes) 2024-11-11T16:25:55,499 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 might be still open, length is 0 2024-11-11T16:25:55,499 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 2024-11-11T16:25:55,500 WARN [IPC Server handler 0 on default port 39605 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741855_1031 2024-11-11T16:25:55,509 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 after 9ms 2024-11-11T16:25:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:25:56,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:25:56,656 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47230 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:40903:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47230 dst: /127.0.0.1:40903 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40903 remote=/127.0.0.1:47230]. Total timeout mills is 60000, 58730 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:25:56,659 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:47286 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:41813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47286 dst: /127.0.0.1:41813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:25:56,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:39052 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741855_1031] {}] datanode.DataXceiver(331): 127.0.0.1:32929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:39052 dst: /127.0.0.1:32929 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:25:56,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741855_1032 (size=263633) 2024-11-11T16:25:56,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741855_1032 (size=263633) 2024-11-11T16:25:58,289 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T16:25:58,362 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T16:25:59,399 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T16:25:59,399 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T16:25:59,401 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T16:25:59,401 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T16:25:59,401 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T16:25:59,401 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T16:25:59,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL 2024-11-11T16:25:59,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenIntoWAL Metrics about Tables on a single HBase RegionServer 2024-11-11T16:25:59,510 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 after 4011ms 2024-11-11T16:25:59,515 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:59,517 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 took 4019ms 2024-11-11T16:25:59,523 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731342354983.temp 2024-11-11T16:25:59,526 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp 2024-11-11T16:25:59,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:25:59,712 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983; continuing. 2024-11-11T16:25:59,712 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 so closing down 2024-11-11T16:25:59,712 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:25:59,713 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:25:59,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741856_1033 (size=263641) 2024-11-11T16:25:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741856_1033 (size=263641) 2024-11-11T16:25:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741856_1033 (size=263641) 2024-11-11T16:25:59,744 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp (wrote 3002 edits, skipped 0 edits in 85 ms) 2024-11-11T16:25:59,752 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 2024-11-11T16:25:59,753 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3002 edits across 1 Regions in 236 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, size=0, length=0, corrupted=false, cancelled=false 2024-11-11T16:25:59,753 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, journal: Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, size=0 (0bytes) at 1731342355499Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp at 1731342359527 (+4028 ms)Split 1024 edits, skipped 0 edits. at 1731342359609 (+82 ms)Split 2048 edits, skipped 0 edits. at 1731342359652 (+43 ms)Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 so closing down at 1731342359712 (+60 ms)3 split writer threads finished at 1731342359713 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp (wrote 3002 edits, skipped 0 edits in 85 ms) at 1731342359745 (+32 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000000001-wal.1731342354983.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 at 1731342359753 (+8 ms)Processed 3002 edits across 1 Regions in 236 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983, size=0, length=0, corrupted=false, cancelled=false at 1731342359753 2024-11-11T16:25:59,759 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342354983 2024-11-11T16:25:59,761 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 2024-11-11T16:25:59,761 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:25:59,768 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:25:59,790 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342359769, exclude list is [], retry=0 2024-11-11T16:25:59,794 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:25:59,795 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:25:59,795 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:25:59,813 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342359769 2024-11-11T16:25:59,817 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:25:59,817 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:25:59,827 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,829 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName a 2024-11-11T16:25:59,829 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:59,832 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:59,832 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,834 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName b 2024-11-11T16:25:59,834 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:59,834 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:59,835 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,836 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3753c95cc70b081016fdbd80fb1b3344 columnFamilyName c 2024-11-11T16:25:59,836 DEBUG [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:25:59,837 INFO [StoreOpener-3753c95cc70b081016fdbd80fb1b3344-1 {}] regionserver.HStore(327): Store=3753c95cc70b081016fdbd80fb1b3344/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:25:59,837 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,838 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,841 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:25:59,845 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 2024-11-11T16:25:59,859 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:25:59,922 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-11T16:26:00,444 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3753c95cc70b081016fdbd80fb1b3344 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-11T16:26:00,501 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/bb52d3e6ecd54512b6cae4b040662c55 is 62, key is testReplayEditsWrittenIntoWAL/a:100/1731342355041/Put/seqid=0 2024-11-11T16:26:00,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741858_1035 (size=50463) 2024-11-11T16:26:00,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741858_1035 (size=50463) 2024-11-11T16:26:00,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741858_1035 (size=50463) 2024-11-11T16:26:00,519 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=754 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/bb52d3e6ecd54512b6cae4b040662c55 2024-11-11T16:26:00,528 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/bb52d3e6ecd54512b6cae4b040662c55 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/a/bb52d3e6ecd54512b6cae4b040662c55 2024-11-11T16:26:00,537 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/a/bb52d3e6ecd54512b6cae4b040662c55, entries=754, sequenceid=754, filesize=49.3 K 2024-11-11T16:26:00,538 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.59 KB/101984, currentSize=0 B/0 for 3753c95cc70b081016fdbd80fb1b3344 in 93ms, sequenceid=754, compaction requested=false; wal=null 2024-11-11T16:26:00,561 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-11T16:26:00,561 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3753c95cc70b081016fdbd80fb1b3344 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-11T16:26:00,571 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/fd1e012ef9d94e9ca0b39c5db1512eb1 is 62, key is testReplayEditsWrittenIntoWAL/a:754/1731342355089/Put/seqid=0 2024-11-11T16:26:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741859_1036 (size=20072) 2024-11-11T16:26:00,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741859_1036 (size=20072) 2024-11-11T16:26:00,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741859_1036 (size=20072) 2024-11-11T16:26:00,596 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.93 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/fd1e012ef9d94e9ca0b39c5db1512eb1 2024-11-11T16:26:00,636 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/281a414f2cad46bd862f09842ee29870 is 62, key is testReplayEditsWrittenIntoWAL/b:100/1731342355123/Put/seqid=0 2024-11-11T16:26:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741860_1037 (size=35835) 2024-11-11T16:26:00,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741860_1037 (size=35835) 2024-11-11T16:26:00,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741860_1037 (size=35835) 2024-11-11T16:26:00,647 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=28.56 KB at sequenceid=1508 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/281a414f2cad46bd862f09842ee29870 2024-11-11T16:26:00,657 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/a/fd1e012ef9d94e9ca0b39c5db1512eb1 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/a/fd1e012ef9d94e9ca0b39c5db1512eb1 2024-11-11T16:26:00,666 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/a/fd1e012ef9d94e9ca0b39c5db1512eb1, entries=246, sequenceid=1508, filesize=19.6 K 2024-11-11T16:26:00,673 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/281a414f2cad46bd862f09842ee29870 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/b/281a414f2cad46bd862f09842ee29870 2024-11-11T16:26:00,682 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/b/281a414f2cad46bd862f09842ee29870, entries=508, sequenceid=1508, filesize=35.0 K 2024-11-11T16:26:00,682 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 3753c95cc70b081016fdbd80fb1b3344 in 121ms, sequenceid=1508, compaction requested=false; wal=null 2024-11-11T16:26:00,695 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-11T16:26:00,696 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3753c95cc70b081016fdbd80fb1b3344 3/3 column families, dataSize=42.49 KB heapSize=100.11 KB 2024-11-11T16:26:00,705 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/08ec7c4641e3490799b413d62980c4d9 is 62, key is testReplayEditsWrittenIntoWAL/b:508/1731342355147/Put/seqid=0 2024-11-11T16:26:00,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741861_1038 (size=35082) 2024-11-11T16:26:00,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741861_1038 (size=35082) 2024-11-11T16:26:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741861_1038 (size=35082) 2024-11-11T16:26:00,753 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=27.87 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/08ec7c4641e3490799b413d62980c4d9 2024-11-11T16:26:00,790 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/255226a0463a46a4b00b0648a1bc2e23 is 62, key is testReplayEditsWrittenIntoWAL/c:100/1731342355210/Put/seqid=0 2024-11-11T16:26:00,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741862_1039 (size=20825) 2024-11-11T16:26:00,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741862_1039 (size=20825) 2024-11-11T16:26:00,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741862_1039 (size=20825) 2024-11-11T16:26:00,847 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.63 KB at sequenceid=2262 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/255226a0463a46a4b00b0648a1bc2e23 2024-11-11T16:26:00,859 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/b/08ec7c4641e3490799b413d62980c4d9 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/b/08ec7c4641e3490799b413d62980c4d9 2024-11-11T16:26:00,871 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/b/08ec7c4641e3490799b413d62980c4d9, entries=492, sequenceid=2262, filesize=34.3 K 2024-11-11T16:26:00,873 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/255226a0463a46a4b00b0648a1bc2e23 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/c/255226a0463a46a4b00b0648a1bc2e23 2024-11-11T16:26:00,882 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/c/255226a0463a46a4b00b0648a1bc2e23, entries=262, sequenceid=2262, filesize=20.3 K 2024-11-11T16:26:00,883 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~42.49 KB/43512, heapSize ~99.83 KB/102224, currentSize=0 B/0 for 3753c95cc70b081016fdbd80fb1b3344 in 187ms, sequenceid=2262, compaction requested=false; wal=null 2024-11-11T16:26:00,900 WARN [Time-limited test {}] regionserver.HRegion(5722): No family for cell testReplayEditsWrittenIntoWAL/another family:testReplayEditsWrittenIntoWAL/1731342355371/Put/vlen=29/seqid=0 in region testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:26:00,904 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3001, skipped 1, firstSequenceIdInLog=1, maxSequenceIdInLog=3002, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 2024-11-11T16:26:00,905 INFO [Time-limited test {}] wal.AbstractTestWALReplay$4$1(796): InternalFlushCache Invoked 2024-11-11T16:26:00,906 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3753c95cc70b081016fdbd80fb1b3344 3/3 column families, dataSize=41.85 KB heapSize=98.89 KB 2024-11-11T16:26:00,924 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/00f4f82645524a3a846bed0578195ffc is 62, key is testReplayEditsWrittenIntoWAL/c:262/1731342355241/Put/seqid=0 2024-11-11T16:26:00,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741863_1040 (size=50301) 2024-11-11T16:26:00,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741863_1040 (size=50301) 2024-11-11T16:26:00,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741863_1040 (size=50301) 2024-11-11T16:26:00,959 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=41.85 KB at sequenceid=3002 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/00f4f82645524a3a846bed0578195ffc 2024-11-11T16:26:00,970 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 00f4f82645524a3a846bed0578195ffc 2024-11-11T16:26:00,971 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/.tmp/c/00f4f82645524a3a846bed0578195ffc as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/c/00f4f82645524a3a846bed0578195ffc 2024-11-11T16:26:00,979 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 00f4f82645524a3a846bed0578195ffc 2024-11-11T16:26:00,980 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/c/00f4f82645524a3a846bed0578195ffc, entries=739, sequenceid=3002, filesize=49.1 K 2024-11-11T16:26:00,980 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~41.85 KB/42854, heapSize ~98.38 KB/100736, currentSize=0 B/0 for 3753c95cc70b081016fdbd80fb1b3344 in 75ms, sequenceid=3002, compaction requested=false; wal=null 2024-11-11T16:26:00,981 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/0000000000000003002 2024-11-11T16:26:00,983 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:26:00,983 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:26:00,984 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenIntoWAL descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T16:26:00,987 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 3753c95cc70b081016fdbd80fb1b3344 2024-11-11T16:26:00,993 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenIntoWAL/3753c95cc70b081016fdbd80fb1b3344/recovered.edits/3002.seqid, newMaxSeqId=3002, maxSeqId=1 2024-11-11T16:26:00,995 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 3753c95cc70b081016fdbd80fb1b3344; next sequenceid=3003; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=204800, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67595452, jitterRate=0.007250726222991943}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T16:26:00,995 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 3753c95cc70b081016fdbd80fb1b3344: Writing region info on filesystem at 1731342359817Initializing all the Stores at 1731342359824 (+7 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342359825 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342359827 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342359827Cleaning up temporary data from old regions at 1731342360983 (+1156 ms)Region opened successfully at 1731342360995 (+12 ms) 2024-11-11T16:26:01,064 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 3753c95cc70b081016fdbd80fb1b3344, disabling compactions & flushes 2024-11-11T16:26:01,064 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:26:01,064 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:26:01,064 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. after waiting 0 ms 2024-11-11T16:26:01,064 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:26:01,072 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenIntoWAL,,1731342354713.3753c95cc70b081016fdbd80fb1b3344. 2024-11-11T16:26:01,072 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 3753c95cc70b081016fdbd80fb1b3344: Waiting for close lock at 1731342361064Disabling compacts and flushes for region at 1731342361064Disabling writes for close at 1731342361064Writing region close event to WAL at 1731342361072 (+8 ms)Closed at 1731342361072 2024-11-11T16:26:01,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741857_1034 (size=95) 2024-11-11T16:26:01,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741857_1034 (size=95) 2024-11-11T16:26:01,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741857_1034 (size=95) 2024-11-11T16:26:01,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:01,082 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731342359769) 2024-11-11T16:26:01,102 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenIntoWAL Thread=390 (was 371) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@7862f5d7[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_940042722_22 at /127.0.0.1:33832 [Waiting for operation #12] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@7eec11bb[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_940042722_22 at /127.0.0.1:43588 [Waiting for operation #27] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:40417 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.replay.wal.secondtime@localhost:39605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_940042722_22 at /127.0.0.1:37154 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44235 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39605 from jenkins.replay.wal.secondtime java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-12-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@2dc9d0e1[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_940042722_22 at /127.0.0.1:33802 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:44235 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-12-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40417 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=835 (was 755) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=735 (was 692) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2854 (was 3362) 2024-11-11T16:26:01,121 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=390, OpenFileDescriptor=835, MaxFileDescriptor=1048576, SystemLoadAverage=735, ProcessCount=11, AvailableMemoryMB=2849 2024-11-11T16:26:01,145 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:01,148 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:01,149 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:01,152 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-17841738, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-17841738, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:01,168 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-17841738/hregion-17841738.1731342361153, exclude list is [], retry=0 2024-11-11T16:26:01,172 DEBUG [AsyncFSWAL-14-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:01,174 DEBUG [AsyncFSWAL-14-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:01,177 DEBUG [AsyncFSWAL-14-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:01,189 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-17841738/hregion-17841738.1731342361153 2024-11-11T16:26:01,189 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:01,190 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 0ba36768de98d6098b1314deb18fe026, NAME => 'test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='test2727', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:01,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741865_1042 (size=43) 2024-11-11T16:26:01,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741865_1042 (size=43) 2024-11-11T16:26:01,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741865_1042 (size=43) 2024-11-11T16:26:01,212 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:01,214 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,216 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName a 2024-11-11T16:26:01,216 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:01,217 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:01,217 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,219 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName b 2024-11-11T16:26:01,219 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:01,219 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:01,219 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,222 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName c 2024-11-11T16:26:01,222 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:01,223 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:01,223 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,224 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,224 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,226 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,226 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,227 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:01,228 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:01,231 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:01,232 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0ba36768de98d6098b1314deb18fe026; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71181781, jitterRate=0.060691192746162415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:01,233 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0ba36768de98d6098b1314deb18fe026: Writing region info on filesystem at 1731342361212Initializing all the Stores at 1731342361213 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342361213Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342361214 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342361214Cleaning up temporary data from old regions at 1731342361226 (+12 ms)Region opened successfully at 1731342361233 (+7 ms) 2024-11-11T16:26:01,234 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0ba36768de98d6098b1314deb18fe026, disabling compactions & flushes 2024-11-11T16:26:01,234 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:01,234 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:01,234 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. after waiting 0 ms 2024-11-11T16:26:01,234 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:01,235 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:01,235 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0ba36768de98d6098b1314deb18fe026: Waiting for close lock at 1731342361234Disabling compacts and flushes for region at 1731342361234Disabling writes for close at 1731342361234Writing region close event to WAL at 1731342361235 (+1 ms)Closed at 1731342361235 2024-11-11T16:26:01,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741864_1041 (size=95) 2024-11-11T16:26:01,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741864_1041 (size=95) 2024-11-11T16:26:01,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741864_1041 (size=95) 2024-11-11T16:26:01,241 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/hregion-17841738/hregion-17841738.1731342361153 not finished, retry = 0 2024-11-11T16:26:01,345 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:01,345 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-17841738:(num 1731342361153) 2024-11-11T16:26:01,345 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:01,348 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:01,363 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, exclude list is [], retry=0 2024-11-11T16:26:01,367 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:01,367 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:01,368 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:01,370 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 2024-11-11T16:26:01,370 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741866_1043 (size=263359) 2024-11-11T16:26:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741866_1043 (size=263359) 2024-11-11T16:26:01,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741866_1043 (size=263359) 2024-11-11T16:26:01,577 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, size=257.2 K (263359bytes) 2024-11-11T16:26:01,577 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 2024-11-11T16:26:01,578 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 after 1ms 2024-11-11T16:26:01,582 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:01,584 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 took 7ms 2024-11-11T16:26:01,591 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000001-wal.1731342361348.temp 2024-11-11T16:26:01,593 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp 2024-11-11T16:26:01,661 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 so closing down 2024-11-11T16:26:01,661 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:01,662 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741867_1044 (size=263359) 2024-11-11T16:26:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741867_1044 (size=263359) 2024-11-11T16:26:01,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741867_1044 (size=263359) 2024-11-11T16:26:02,090 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp (wrote 3000 edits, skipped 0 edits in 45 ms) 2024-11-11T16:26:02,092 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp to hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 2024-11-11T16:26:02,093 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 508 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, size=257.2 K, length=263359, corrupted=false, cancelled=false 2024-11-11T16:26:02,093 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, journal: Splitting hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, size=257.2 K (263359bytes) at 1731342361577Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp at 1731342361593 (+16 ms)Split 1024 edits, skipped 0 edits. at 1731342361615 (+22 ms)Split 2048 edits, skipped 0 edits. at 1731342361636 (+21 ms)Finishing writing output for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 so closing down at 1731342361661 (+25 ms)3 split writer threads finished at 1731342361662 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp (wrote 3000 edits, skipped 0 edits in 45 ms) at 1731342362090 (+428 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000000001-wal.1731342361348.temp to hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 at 1731342362092 (+2 ms)Processed 3000 edits across 1 Regions in 508 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348, size=257.2 K, length=263359, corrupted=false, cancelled=false at 1731342362093 (+1 ms) 2024-11-11T16:26:02,095 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342361348 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342361348 2024-11-11T16:26:02,096 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 2024-11-11T16:26:02,096 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:02,098 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:02,119 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, exclude list is [], retry=0 2024-11-11T16:26:02,122 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:02,123 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:02,123 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:02,125 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 2024-11-11T16:26:02,126 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:02,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741868_1045 (size=263486) 2024-11-11T16:26:02,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741868_1045 (size=263486) 2024-11-11T16:26:02,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741868_1045 (size=263486) 2024-11-11T16:26:02,379 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 not finished, retry = 0 2024-11-11T16:26:02,498 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, size=257.3 K (263486bytes) 2024-11-11T16:26:02,498 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 2024-11-11T16:26:02,499 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 after 1ms 2024-11-11T16:26:02,502 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:02,504 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 took 6ms 2024-11-11T16:26:02,508 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000003001-wal.1731342362099.temp 2024-11-11T16:26:02,511 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp 2024-11-11T16:26:02,567 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 so closing down 2024-11-11T16:26:02,567 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:02,569 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:02,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741869_1046 (size=263486) 2024-11-11T16:26:02,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741869_1046 (size=263486) 2024-11-11T16:26:02,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741869_1046 (size=263486) 2024-11-11T16:26:02,574 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp (wrote 3000 edits, skipped 0 edits in 42 ms) 2024-11-11T16:26:02,576 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp to hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 2024-11-11T16:26:02,577 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 3000 edits across 1 Regions in 73 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, size=257.3 K, length=263486, corrupted=false, cancelled=false 2024-11-11T16:26:02,577 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, journal: Splitting hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, size=257.3 K (263486bytes) at 1731342362498Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp at 1731342362511 (+13 ms)Split 1024 edits, skipped 0 edits. at 1731342362527 (+16 ms)Split 2048 edits, skipped 0 edits. at 1731342362549 (+22 ms)Finishing writing output for hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 so closing down at 1731342362567 (+18 ms)3 split writer threads finished at 1731342362569 (+2 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp (wrote 3000 edits, skipped 0 edits in 42 ms) at 1731342362574 (+5 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003001-wal.1731342362099.temp to hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 at 1731342362577 (+3 ms)Processed 3000 edits across 1 Regions in 73 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099, size=257.3 K, length=263486, corrupted=false, cancelled=false at 1731342362577 2024-11-11T16:26:02,580 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362099 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342362099 2024-11-11T16:26:02,581 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 2024-11-11T16:26:02,581 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:02,584 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/test2727-manual,16010,1731342361144, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:02,605 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362585, exclude list is [], retry=0 2024-11-11T16:26:02,609 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:02,609 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:02,610 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:02,624 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362585 2024-11-11T16:26:02,625 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:02,625 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 0ba36768de98d6098b1314deb18fe026, NAME => 'test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:02,625 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:02,626 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,626 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,637 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,642 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName a 2024-11-11T16:26:02,642 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:02,643 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:02,643 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,644 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName b 2024-11-11T16:26:02,645 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:02,645 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:02,645 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,647 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ba36768de98d6098b1314deb18fe026 columnFamilyName c 2024-11-11T16:26:02,647 DEBUG [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:02,647 INFO [StoreOpener-0ba36768de98d6098b1314deb18fe026-1 {}] regionserver.HStore(327): Store=0ba36768de98d6098b1314deb18fe026/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:02,647 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,648 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,652 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 2 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:02,653 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 2024-11-11T16:26:02,656 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:02,729 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=1, maxSequenceIdInLog=3000, path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 2024-11-11T16:26:02,737 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 2024-11-11T16:26:02,742 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:02,816 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 3000, skipped 0, firstSequenceIdInLog=3001, maxSequenceIdInLog=6000, path=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 2024-11-11T16:26:02,817 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 0ba36768de98d6098b1314deb18fe026 3/3 column families, dataSize=215.51 KB heapSize=657 KB 2024-11-11T16:26:02,851 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/a/bfe8498b07434ff7a58bdf23eff1e650 is 41, key is test2727/a:100/1731342362131/Put/seqid=0 2024-11-11T16:26:02,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741871_1048 (size=84227) 2024-11-11T16:26:02,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741871_1048 (size=84227) 2024-11-11T16:26:02,890 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/a/bfe8498b07434ff7a58bdf23eff1e650 2024-11-11T16:26:02,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741871_1048 (size=84227) 2024-11-11T16:26:02,933 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/b/941bd00c984f4585ae8fad940149c670 is 41, key is test2727/b:100/1731342362264/Put/seqid=0 2024-11-11T16:26:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741872_1049 (size=84609) 2024-11-11T16:26:02,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741872_1049 (size=84609) 2024-11-11T16:26:02,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741872_1049 (size=84609) 2024-11-11T16:26:02,973 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/b/941bd00c984f4585ae8fad940149c670 2024-11-11T16:26:03,093 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/c/5df7ff1a1972476d8f11e7163237ef33 is 41, key is test2727/c:100/1731342362315/Put/seqid=0 2024-11-11T16:26:03,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741873_1050 (size=84609) 2024-11-11T16:26:03,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741873_1050 (size=84609) 2024-11-11T16:26:03,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741873_1050 (size=84609) 2024-11-11T16:26:03,141 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=71.84 KB at sequenceid=6000 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/c/5df7ff1a1972476d8f11e7163237ef33 2024-11-11T16:26:03,154 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/a/bfe8498b07434ff7a58bdf23eff1e650 as hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/a/bfe8498b07434ff7a58bdf23eff1e650 2024-11-11T16:26:03,165 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/a/bfe8498b07434ff7a58bdf23eff1e650, entries=2000, sequenceid=6000, filesize=82.3 K 2024-11-11T16:26:03,166 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/b/941bd00c984f4585ae8fad940149c670 as hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/b/941bd00c984f4585ae8fad940149c670 2024-11-11T16:26:03,175 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/b/941bd00c984f4585ae8fad940149c670, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-11T16:26:03,177 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/.tmp/c/5df7ff1a1972476d8f11e7163237ef33 as hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/c/5df7ff1a1972476d8f11e7163237ef33 2024-11-11T16:26:03,185 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/c/5df7ff1a1972476d8f11e7163237ef33, entries=2000, sequenceid=6000, filesize=82.6 K 2024-11-11T16:26:03,185 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 0ba36768de98d6098b1314deb18fe026 in 369ms, sequenceid=6000, compaction requested=false; wal=null 2024-11-11T16:26:03,186 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000003000 2024-11-11T16:26:03,187 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/0000000000000006000 2024-11-11T16:26:03,189 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:03,189 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:03,190 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table test2727 descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:03,193 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 0ba36768de98d6098b1314deb18fe026 2024-11-11T16:26:03,198 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/test2727/0ba36768de98d6098b1314deb18fe026/recovered.edits/6000.seqid, newMaxSeqId=6000, maxSeqId=1 2024-11-11T16:26:03,199 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 0ba36768de98d6098b1314deb18fe026; next sequenceid=6001; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61411378, jitterRate=-0.08489915728569031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:03,201 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 0ba36768de98d6098b1314deb18fe026: Writing region info on filesystem at 1731342362626Initializing all the Stores at 1731342362627 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342362628 (+1 ms)Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342362636 (+8 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342362636Obtaining lock to block concurrent updates at 1731342362817 (+181 ms)Preparing flush snapshotting stores in 0ba36768de98d6098b1314deb18fe026 at 1731342362817Finished memstore snapshotting test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026., syncing WAL and waiting on mvcc, flushsize=dataSize=220680, getHeapSize=672720, getOffHeapSize=0, getCellsCount=6000 at 1731342362817Flushing stores of test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. at 1731342362817Flushing 0ba36768de98d6098b1314deb18fe026/a: creating writer at 1731342362817Flushing 0ba36768de98d6098b1314deb18fe026/a: appending metadata at 1731342362850 (+33 ms)Flushing 0ba36768de98d6098b1314deb18fe026/a: closing flushed file at 1731342362850Flushing 0ba36768de98d6098b1314deb18fe026/b: creating writer at 1731342362905 (+55 ms)Flushing 0ba36768de98d6098b1314deb18fe026/b: appending metadata at 1731342362931 (+26 ms)Flushing 0ba36768de98d6098b1314deb18fe026/b: closing flushed file at 1731342362931Flushing 0ba36768de98d6098b1314deb18fe026/c: creating writer at 1731342362997 (+66 ms)Flushing 0ba36768de98d6098b1314deb18fe026/c: appending metadata at 1731342363091 (+94 ms)Flushing 0ba36768de98d6098b1314deb18fe026/c: closing flushed file at 1731342363092 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8dd134c: reopening flushed file at 1731342363153 (+61 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f575364: reopening flushed file at 1731342363165 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f132c69: reopening flushed file at 1731342363175 (+10 ms)Finished flush of dataSize ~215.51 KB/220680, heapSize ~656.95 KB/672720, currentSize=0 B/0 for 0ba36768de98d6098b1314deb18fe026 in 369ms, sequenceid=6000, compaction requested=false; wal=null at 1731342363185 (+10 ms)Cleaning up temporary data from old regions at 1731342363189 (+4 ms)Region opened successfully at 1731342363201 (+12 ms) 2024-11-11T16:26:03,202 DEBUG [Time-limited test {}] wal.AbstractTestWALReplay(320): region.getOpenSeqNum(): 6001, wal3.id: 0 2024-11-11T16:26:03,203 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 0ba36768de98d6098b1314deb18fe026, disabling compactions & flushes 2024-11-11T16:26:03,203 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:03,203 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:03,203 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. after waiting 0 ms 2024-11-11T16:26:03,203 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:03,220 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed test2727,,1731342361146.0ba36768de98d6098b1314deb18fe026. 2024-11-11T16:26:03,220 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 0ba36768de98d6098b1314deb18fe026: Waiting for close lock at 1731342363202Disabling compacts and flushes for region at 1731342363202Disabling writes for close at 1731342363203 (+1 ms)Writing region close event to WAL at 1731342363220 (+17 ms)Closed at 1731342363220 2024-11-11T16:26:03,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741870_1047 (size=95) 2024-11-11T16:26:03,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741870_1047 (size=95) 2024-11-11T16:26:03,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741870_1047 (size=95) 2024-11-11T16:26:03,232 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/test2727-manual,16010,1731342361144/wal.1731342362585 not finished, retry = 0 2024-11-11T16:26:03,336 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:03,336 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731342362585) 2024-11-11T16:26:03,359 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#test2727 Thread=387 (was 390), OpenFileDescriptor=891 (was 835) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=735 (was 735), ProcessCount=11 (was 11), AvailableMemoryMB=2556 (was 2849) 2024-11-11T16:26:03,374 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=387, OpenFileDescriptor=891, MaxFileDescriptor=1048576, SystemLoadAverage=735, ProcessCount=11, AvailableMemoryMB=2555 2024-11-11T16:26:03,396 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:03,404 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:03,404 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor wal.1731342363404 2024-11-11T16:26:03,417 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 2024-11-11T16:26:03,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new MockWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:03,437 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 63d26053c7b2a3e286809ad82dbaef05, NAME => 'testSequentialEditLogSeqNum,,1731342363397.63d26053c7b2a3e286809ad82dbaef05.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:03,438 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testSequentialEditLogSeqNum,,1731342363397.63d26053c7b2a3e286809ad82dbaef05.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:03,438 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,438 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,439 WARN [Time-limited test {}] regionserver.HRegionFileSystem(836): hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05 doesn't exist for region: 63d26053c7b2a3e286809ad82dbaef05 on table testSequentialEditLogSeqNum 2024-11-11T16:26:03,440 WARN [Time-limited test {}] regionserver.HRegionFileSystem(854): .regioninfo file not found for region: 63d26053c7b2a3e286809ad82dbaef05 on table testSequentialEditLogSeqNum 2024-11-11T16:26:03,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741875_1052 (size=62) 2024-11-11T16:26:03,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741875_1052 (size=62) 2024-11-11T16:26:03,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741875_1052 (size=62) 2024-11-11T16:26:03,454 INFO [StoreOpener-63d26053c7b2a3e286809ad82dbaef05-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,456 INFO [StoreOpener-63d26053c7b2a3e286809ad82dbaef05-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 63d26053c7b2a3e286809ad82dbaef05 columnFamilyName a 2024-11-11T16:26:03,456 DEBUG [StoreOpener-63d26053c7b2a3e286809ad82dbaef05-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,457 INFO [StoreOpener-63d26053c7b2a3e286809ad82dbaef05-1 {}] regionserver.HStore(327): Store=63d26053c7b2a3e286809ad82dbaef05/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,457 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,458 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,458 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,461 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,461 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,464 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 63d26053c7b2a3e286809ad82dbaef05 2024-11-11T16:26:03,469 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:03,470 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 63d26053c7b2a3e286809ad82dbaef05; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66527440, jitterRate=-0.00866389274597168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:26:03,471 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 63d26053c7b2a3e286809ad82dbaef05: Writing region info on filesystem at 1731342363438Initializing all the Stores at 1731342363453 (+15 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363453Cleaning up temporary data from old regions at 1731342363461 (+8 ms)Region opened successfully at 1731342363471 (+10 ms) 2024-11-11T16:26:03,485 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 63d26053c7b2a3e286809ad82dbaef05 1/1 column families, dataSize=770 B heapSize=1.73 KB 2024-11-11T16:26:03,518 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/.tmp/a/ec1543a73d43499c8a05081b38e38f9e is 81, key is testSequentialEditLogSeqNum/a:x0/1731342363471/Put/seqid=0 2024-11-11T16:26:03,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741876_1053 (size=5833) 2024-11-11T16:26:03,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741876_1053 (size=5833) 2024-11-11T16:26:03,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741876_1053 (size=5833) 2024-11-11T16:26:03,531 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=770 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/.tmp/a/ec1543a73d43499c8a05081b38e38f9e 2024-11-11T16:26:03,540 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/.tmp/a/ec1543a73d43499c8a05081b38e38f9e as hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/a/ec1543a73d43499c8a05081b38e38f9e 2024-11-11T16:26:03,551 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/a/ec1543a73d43499c8a05081b38e38f9e, entries=10, sequenceid=13, filesize=5.7 K 2024-11-11T16:26:03,557 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~770 B/770, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 63d26053c7b2a3e286809ad82dbaef05 in 72ms, sequenceid=13, compaction requested=false 2024-11-11T16:26:03,557 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 63d26053c7b2a3e286809ad82dbaef05: 2024-11-11T16:26:03,565 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:26:03,565 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:26:03,566 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:26:03,566 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:26:03,566 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:26:03,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741874_1051 (size=1844) 2024-11-11T16:26:03,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741874_1051 (size=1844) 2024-11-11T16:26:03,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741874_1051 (size=1844) 2024-11-11T16:26:03,593 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404, size=1.8 K (1844bytes) 2024-11-11T16:26:03,594 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 2024-11-11T16:26:03,594 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 after 0ms 2024-11-11T16:26:03,598 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:03,598 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 took 5ms 2024-11-11T16:26:03,611 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 so closing down 2024-11-11T16:26:03,611 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:03,613 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731342363404.temp 2024-11-11T16:26:03,616 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp 2024-11-11T16:26:03,617 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741877_1054 (size=1477) 2024-11-11T16:26:03,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741877_1054 (size=1477) 2024-11-11T16:26:03,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741877_1054 (size=1477) 2024-11-11T16:26:03,632 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:03,634 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp to hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000020 2024-11-11T16:26:03,635 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 17 edits across 1 Regions in 27 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404, size=1.8 K, length=1844, corrupted=false, cancelled=false 2024-11-11T16:26:03,635 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404, journal: Splitting hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404, size=1.8 K (1844bytes) at 1731342363594Finishing writing output for hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404 so closing down at 1731342363611 (+17 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp at 1731342363616 (+5 ms)3 split writer threads finished at 1731342363617 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1731342363632 (+15 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000003-wal.1731342363404.temp to hdfs://localhost:39605/hbase/data/default/testSequentialEditLogSeqNum/63d26053c7b2a3e286809ad82dbaef05/recovered.edits/0000000000000000020 at 1731342363634 (+2 ms)Processed 17 edits across 1 Regions in 27 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testsequentialeditlogseqnum-manual,16010,1731342363395/wal.1731342363404, size=1.8 K, length=1844, corrupted=false, cancelled=false at 1731342363635 (+1 ms) 2024-11-11T16:26:03,652 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testSequentialEditLogSeqNum Thread=392 (was 387) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:33984 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:43726 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=927 (was 891) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=735 (was 735), ProcessCount=11 (was 11), AvailableMemoryMB=2549 (was 2555) 2024-11-11T16:26:03,665 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=392, OpenFileDescriptor=927, MaxFileDescriptor=1048576, SystemLoadAverage=735, ProcessCount=11, AvailableMemoryMB=2549 2024-11-11T16:26:03,685 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:03,687 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:03,688 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:03,692 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-42586345, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-42586345, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:03,711 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-42586345/hregion-42586345.1731342363693, exclude list is [], retry=0 2024-11-11T16:26:03,715 DEBUG [AsyncFSWAL-17-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:03,715 DEBUG [AsyncFSWAL-17-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:03,721 DEBUG [AsyncFSWAL-17-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:03,731 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-42586345/hregion-42586345.1731342363693 2024-11-11T16:26:03,733 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:03,733 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 83e7e092e8dd21d535028e1a6532271e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testRegionMadeOfBulkLoadedFilesOnly', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:03,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741879_1056 (size=70) 2024-11-11T16:26:03,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741879_1056 (size=70) 2024-11-11T16:26:03,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741879_1056 (size=70) 2024-11-11T16:26:03,757 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:03,759 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,761 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName a 2024-11-11T16:26:03,761 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,762 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,762 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,766 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName b 2024-11-11T16:26:03,766 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,766 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,767 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,770 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName c 2024-11-11T16:26:03,770 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,771 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,772 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,773 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,773 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,775 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,775 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,775 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:03,779 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,785 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:03,786 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 83e7e092e8dd21d535028e1a6532271e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58788485, jitterRate=-0.12398330867290497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:03,787 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 83e7e092e8dd21d535028e1a6532271e: Writing region info on filesystem at 1731342363757Initializing all the Stores at 1731342363758 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363758Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363759 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363759Cleaning up temporary data from old regions at 1731342363775 (+16 ms)Region opened successfully at 1731342363787 (+12 ms) 2024-11-11T16:26:03,788 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 83e7e092e8dd21d535028e1a6532271e, disabling compactions & flushes 2024-11-11T16:26:03,788 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:03,788 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:03,788 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. after waiting 0 ms 2024-11-11T16:26:03,788 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:03,788 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:03,788 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 83e7e092e8dd21d535028e1a6532271e: Waiting for close lock at 1731342363788Disabling compacts and flushes for region at 1731342363788Disabling writes for close at 1731342363788Writing region close event to WAL at 1731342363788Closed at 1731342363788 2024-11-11T16:26:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741878_1055 (size=95) 2024-11-11T16:26:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741878_1055 (size=95) 2024-11-11T16:26:03,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741878_1055 (size=95) 2024-11-11T16:26:03,808 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:03,808 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-42586345:(num 1731342363693) 2024-11-11T16:26:03,808 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:03,811 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:03,828 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, exclude list is [], retry=0 2024-11-11T16:26:03,835 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:03,836 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:03,836 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:03,857 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 2024-11-11T16:26:03,865 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:03,865 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 83e7e092e8dd21d535028e1a6532271e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:03,865 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:03,865 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,866 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,868 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,869 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName a 2024-11-11T16:26:03,869 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,870 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,870 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,872 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName b 2024-11-11T16:26:03,872 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,872 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,873 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,874 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName c 2024-11-11T16:26:03,875 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:03,875 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:03,876 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,876 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,878 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,879 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,879 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,880 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:03,882 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:03,883 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 83e7e092e8dd21d535028e1a6532271e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74039072, jitterRate=0.10326814651489258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:03,885 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 83e7e092e8dd21d535028e1a6532271e: Writing region info on filesystem at 1731342363866Initializing all the Stores at 1731342363867 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363867Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363867Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342363867Cleaning up temporary data from old regions at 1731342363880 (+13 ms)Region opened successfully at 1731342363884 (+4 ms) 2024-11-11T16:26:03,891 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile is 28, key is \x0D/a:a/1731342363889/Put/seqid=0 2024-11-11T16:26:03,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741881_1058 (size=4826) 2024-11-11T16:26:03,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741881_1058 (size=4826) 2024-11-11T16:26:03,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741881_1058 (size=4826) 2024-11-11T16:26:03,945 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39605/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile for inclusion in 83e7e092e8dd21d535028e1a6532271e/a 2024-11-11T16:26:03,957 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first= last=z 2024-11-11T16:26:03,958 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-11T16:26:03,958 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 83e7e092e8dd21d535028e1a6532271e: 2024-11-11T16:26:03,960 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile as hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/9b9ad1180495467d892285062a0c21f9_SeqId_3_ 2024-11-11T16:26:03,961 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39605/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 83e7e092e8dd21d535028e1a6532271e/a as hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/9b9ad1180495467d892285062a0c21f9_SeqId_3_ - updating store file list. 2024-11-11T16:26:03,969 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9b9ad1180495467d892285062a0c21f9_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:03,970 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/9b9ad1180495467d892285062a0c21f9_SeqId_3_ into 83e7e092e8dd21d535028e1a6532271e/a 2024-11-11T16:26:03,970 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39605/hbase/testRegionMadeOfBulkLoadedFilesOnly/hfile into 83e7e092e8dd21d535028e1a6532271e/a (new location: hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/9b9ad1180495467d892285062a0c21f9_SeqId_3_) 2024-11-11T16:26:04,020 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, size=0 (0bytes) 2024-11-11T16:26:04,021 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 might be still open, length is 0 2024-11-11T16:26:04,021 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 2024-11-11T16:26:04,021 WARN [IPC Server handler 1 on default port 39605 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 has not been closed. Lease recovery is in progress. RecoveryId = 1059 for block blk_1073741880_1057 2024-11-11T16:26:04,022 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 after 1ms 2024-11-11T16:26:05,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:34052 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:40903:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34052 dst: /127.0.0.1:40903 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:40903 remote=/127.0.0.1:34052]. Total timeout mills is 60000, 58391 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:05,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:37392 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:32929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37392 dst: /127.0.0.1:32929 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:05,590 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:43824 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741880_1057] {}] datanode.DataXceiver(331): 127.0.0.1:41813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43824 dst: /127.0.0.1:41813 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:05,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741880_1059 (size=473) 2024-11-11T16:26:05,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741880_1059 (size=473) 2024-11-11T16:26:08,023 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 after 4001ms 2024-11-11T16:26:08,026 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:08,027 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 took 4007ms 2024-11-11T16:26:08,029 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812; continuing. 2024-11-11T16:26:08,029 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 so closing down 2024-11-11T16:26:08,029 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:08,031 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000005-wal.1731342363812.temp 2024-11-11T16:26:08,033 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp 2024-11-11T16:26:08,033 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:08,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741882_1060 (size=259) 2024-11-11T16:26:08,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741882_1060 (size=259) 2024-11-11T16:26:08,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741882_1060 (size=259) 2024-11-11T16:26:08,044 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp (wrote 1 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:08,046 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp to hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 2024-11-11T16:26:08,046 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 2 edits across 1 Regions in 19 ms; skipped=1; WAL=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, size=0, length=0, corrupted=false, cancelled=false 2024-11-11T16:26:08,046 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, journal: Splitting hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, size=0 (0bytes) at 1731342364021Finishing writing output for hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 so closing down at 1731342368029 (+4008 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp at 1731342368033 (+4 ms)3 split writer threads finished at 1731342368033Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp (wrote 1 edits, skipped 0 edits in 0 ms) at 1731342368045 (+12 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005-wal.1731342363812.temp to hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 at 1731342368046 (+1 ms)Processed 2 edits across 1 Regions in 19 ms; skipped=1; WAL=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812, size=0, length=0, corrupted=false, cancelled=false at 1731342368046 2024-11-11T16:26:08,048 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342363812 2024-11-11T16:26:08,049 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 2024-11-11T16:26:08,049 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:08,051 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:08,072 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342368052, exclude list is [], retry=0 2024-11-11T16:26:08,077 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:08,077 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:08,078 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:08,097 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342368052 2024-11-11T16:26:08,098 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:08,098 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 83e7e092e8dd21d535028e1a6532271e, NAME => 'testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:08,098 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:08,098 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,099 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,102 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,103 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName a 2024-11-11T16:26:08,103 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:08,111 DEBUG [StoreFileOpener-83e7e092e8dd21d535028e1a6532271e-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 9b9ad1180495467d892285062a0c21f9_SeqId_3_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:08,111 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/9b9ad1180495467d892285062a0c21f9_SeqId_3_ 2024-11-11T16:26:08,111 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:08,111 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,112 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName b 2024-11-11T16:26:08,112 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:08,113 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:08,113 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,114 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83e7e092e8dd21d535028e1a6532271e columnFamilyName c 2024-11-11T16:26:08,114 DEBUG [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:08,114 INFO [StoreOpener-83e7e092e8dd21d535028e1a6532271e-1 {}] regionserver.HStore(327): Store=83e7e092e8dd21d535028e1a6532271e/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:08,115 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,115 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,117 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,118 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 2024-11-11T16:26:08,120 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:08,121 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 1, skipped 0, firstSequenceIdInLog=5, maxSequenceIdInLog=5, path=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 2024-11-11T16:26:08,121 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 83e7e092e8dd21d535028e1a6532271e 3/3 column families, dataSize=58 B heapSize=904 B 2024-11-11T16:26:08,137 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/.tmp/a/63becab130b142a889625f56a2595db2 is 62, key is testRegionMadeOfBulkLoadedFilesOnly/a:a/1731342363977/Put/seqid=0 2024-11-11T16:26:08,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741884_1062 (size=5149) 2024-11-11T16:26:08,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741884_1062 (size=5149) 2024-11-11T16:26:08,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741884_1062 (size=5149) 2024-11-11T16:26:08,147 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/.tmp/a/63becab130b142a889625f56a2595db2 2024-11-11T16:26:08,154 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/.tmp/a/63becab130b142a889625f56a2595db2 as hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/63becab130b142a889625f56a2595db2 2024-11-11T16:26:08,160 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/a/63becab130b142a889625f56a2595db2, entries=1, sequenceid=5, filesize=5.0 K 2024-11-11T16:26:08,161 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 83e7e092e8dd21d535028e1a6532271e in 39ms, sequenceid=5, compaction requested=false; wal=null 2024-11-11T16:26:08,161 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/0000000000000000005 2024-11-11T16:26:08,163 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,163 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,164 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testRegionMadeOfBulkLoadedFilesOnly descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:08,166 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 83e7e092e8dd21d535028e1a6532271e 2024-11-11T16:26:08,169 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testRegionMadeOfBulkLoadedFilesOnly/83e7e092e8dd21d535028e1a6532271e/recovered.edits/5.seqid, newMaxSeqId=5, maxSeqId=1 2024-11-11T16:26:08,170 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 83e7e092e8dd21d535028e1a6532271e; next sequenceid=6; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66639598, jitterRate=-0.006992608308792114}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:08,170 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 83e7e092e8dd21d535028e1a6532271e: Writing region info on filesystem at 1731342368099Initializing all the Stores at 1731342368100 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342368100Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342368102 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342368102Obtaining lock to block concurrent updates at 1731342368121 (+19 ms)Preparing flush snapshotting stores in 83e7e092e8dd21d535028e1a6532271e at 1731342368121Finished memstore snapshotting testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e., syncing WAL and waiting on mvcc, flushsize=dataSize=58, getHeapSize=856, getOffHeapSize=0, getCellsCount=1 at 1731342368121Flushing stores of testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. at 1731342368121Flushing 83e7e092e8dd21d535028e1a6532271e/a: creating writer at 1731342368121Flushing 83e7e092e8dd21d535028e1a6532271e/a: appending metadata at 1731342368137 (+16 ms)Flushing 83e7e092e8dd21d535028e1a6532271e/a: closing flushed file at 1731342368137Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@80c063b: reopening flushed file at 1731342368153 (+16 ms)Finished flush of dataSize ~58 B/58, heapSize ~376 B/376, currentSize=0 B/0 for 83e7e092e8dd21d535028e1a6532271e in 39ms, sequenceid=5, compaction requested=false; wal=null at 1731342368161 (+8 ms)Cleaning up temporary data from old regions at 1731342368164 (+3 ms)Region opened successfully at 1731342368170 (+6 ms) 2024-11-11T16:26:08,174 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 83e7e092e8dd21d535028e1a6532271e, disabling compactions & flushes 2024-11-11T16:26:08,174 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:08,174 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:08,174 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. after waiting 0 ms 2024-11-11T16:26:08,174 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:08,175 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testRegionMadeOfBulkLoadedFilesOnly,,1731342363685.83e7e092e8dd21d535028e1a6532271e. 2024-11-11T16:26:08,175 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 83e7e092e8dd21d535028e1a6532271e: Waiting for close lock at 1731342368174Disabling compacts and flushes for region at 1731342368174Disabling writes for close at 1731342368174Writing region close event to WAL at 1731342368175 (+1 ms)Closed at 1731342368175 2024-11-11T16:26:08,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741883_1061 (size=95) 2024-11-11T16:26:08,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741883_1061 (size=95) 2024-11-11T16:26:08,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741883_1061 (size=95) 2024-11-11T16:26:08,182 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:08,182 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731342368052) 2024-11-11T16:26:08,209 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testRegionMadeOfBulkLoadedFilesOnly Thread=399 (was 392) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1557181773_22 at /127.0.0.1:55044 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39605 from jenkinstestRegionMadeOfBulkLoadedFilesOnly java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1557181773_22 at /127.0.0.1:54054 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestRegionMadeOfBulkLoadedFilesOnly@localhost:39605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-17-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1557181773_22 at /127.0.0.1:32898 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=993 (was 927) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=716 (was 735), ProcessCount=11 (was 11), AvailableMemoryMB=2443 (was 2549) 2024-11-11T16:26:08,225 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=399, OpenFileDescriptor=993, MaxFileDescriptor=1048576, SystemLoadAverage=716, ProcessCount=11, AvailableMemoryMB=2442 2024-11-11T16:26:08,249 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:08,291 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T16:26:08,297 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 16b413a53992,40215,1731342348830 2024-11-11T16:26:08,301 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6c2fb858 2024-11-11T16:26:08,302 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T16:26:08,305 INFO [HMaster-EventLoopGroup-2-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T16:26:08,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:26:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF 2024-11-11T16:26:08,322 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T16:26:08,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testReplayEditsAfterRegionMovedWithMultiCF" procId is: 4 2024-11-11T16:26:08,325 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:08,327 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T16:26:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:26:08,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741885_1063 (size=694) 2024-11-11T16:26:08,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741885_1063 (size=694) 2024-11-11T16:26:08,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741885_1063 (size=694) 2024-11-11T16:26:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:26:08,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:26:08,751 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterRegionMovedWithMultiCF', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553 2024-11-11T16:26:08,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741886_1064 (size=77) 2024-11-11T16:26:08,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741886_1064 (size=77) 2024-11-11T16:26:08,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741886_1064 (size=77) 2024-11-11T16:26:08,760 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:08,761 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:08,761 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:08,761 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:08,761 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:08,761 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:08,761 INFO [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:08,761 DEBUG [RegionOpenAndInit-testReplayEditsAfterRegionMovedWithMultiCF-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342368760Disabling compacts and flushes for region at 1731342368760Disabling writes for close at 1731342368761 (+1 ms)Writing region close event to WAL at 1731342368761Closed at 1731342368761 2024-11-11T16:26:08,763 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T16:26:08,767 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.","families":{"info":[{"qualifier":"regioninfo","vlen":76,"tag":[],"timestamp":"1731342368763"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731342368763"}]},"ts":"1731342368763"} 2024-11-11T16:26:08,772 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T16:26:08,773 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T16:26:08,776 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342368773"}]},"ts":"1731342368773"} 2024-11-11T16:26:08,780 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLING in hbase:meta 2024-11-11T16:26:08,781 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:26:08,783 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:26:08,783 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:26:08,783 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:26:08,783 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:26:08,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN}] 2024-11-11T16:26:08,787 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN 2024-11-11T16:26:08,789 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN; state=OFFLINE, location=16b413a53992,43811,1731342350126; forceNewPlan=false, retain=false 2024-11-11T16:26:08,942 INFO [16b413a53992:40215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T16:26:08,943 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPENING, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:26:08,950 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN because future has completed 2024-11-11T16:26:08,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126}] 2024-11-11T16:26:08,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:26:09,113 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,113 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:09,114 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,114 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:09,114 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,114 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,116 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,117 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:09,117 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:09,118 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:09,118 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,120 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:09,120 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:09,121 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:09,121 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,122 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,122 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,123 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,123 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,124 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-11T16:26:09,125 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,130 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:09,131 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9e5cca078c8f306f6c3dea9fad229919; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58818478, jitterRate=-0.1235363781452179}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-11T16:26:09,131 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,131 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9e5cca078c8f306f6c3dea9fad229919: Running coprocessor pre-open hook at 1731342369114Writing region info on filesystem at 1731342369114Initializing all the Stores at 1731342369115 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342369115Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342369116 (+1 ms)Cleaning up temporary data from old regions at 1731342369123 (+7 ms)Running coprocessor post-open hooks at 1731342369131 (+8 ms)Region opened successfully at 1731342369131 2024-11-11T16:26:09,133 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., pid=6, masterSystemTime=1731342369105 2024-11-11T16:26:09,136 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,137 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,138 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:26:09,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 because future has completed 2024-11-11T16:26:09,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T16:26:09,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 in 191 msec 2024-11-11T16:26:09,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T16:26:09,167 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN in 377 msec 2024-11-11T16:26:09,168 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T16:26:09,169 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testReplayEditsAfterRegionMovedWithMultiCF","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342369169"}]},"ts":"1731342369169"} 2024-11-11T16:26:09,173 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testReplayEditsAfterRegionMovedWithMultiCF, state=ENABLED in hbase:meta 2024-11-11T16:26:09,175 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T16:26:09,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=testReplayEditsAfterRegionMovedWithMultiCF in 863 msec 2024-11-11T16:26:09,398 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF 2024-11-11T16:26:09,398 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterRegionMovedWithMultiCF Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:09,400 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly 2024-11-11T16:26:09,400 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testRegionMadeOfBulkLoadedFilesOnly Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:09,401 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum 2024-11-11T16:26:09,401 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testSequentialEditLogSeqNum Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:09,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:26:09,467 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testReplayEditsAfterRegionMovedWithMultiCF completed 2024-11-11T16:26:09,467 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testReplayEditsAfterRegionMovedWithMultiCF get assigned. Timeout = 60000ms 2024-11-11T16:26:09,469 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:26:09,476 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned to meta. Checking AM states. 2024-11-11T16:26:09,477 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:26:09,478 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testReplayEditsAfterRegionMovedWithMultiCF assigned. 2024-11-11T16:26:09,494 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=2] 2024-11-11T16:26:09,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=9e5cca078c8f306f6c3dea9fad229919, source=16b413a53992,43811,1731342350126, destination=16b413a53992,43519,1731342349897, warming up region on 16b413a53992,43519,1731342349897 2024-11-11T16:26:09,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:26:09,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=9e5cca078c8f306f6c3dea9fad229919, source=16b413a53992,43811,1731342350126, destination=16b413a53992,43519,1731342349897, running balancer 2024-11-11T16:26:09,521 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36369, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:26:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE 2024-11-11T16:26:09,522 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE 2024-11-11T16:26:09,525 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=CLOSING, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:26:09,527 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(7855): Warmup {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:09,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE because future has completed 2024-11-11T16:26:09,529 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T16:26:09,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126}] 2024-11-11T16:26:09,532 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,533 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:09,533 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:09,534 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:09,534 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,535 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:09,535 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:09,536 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:09,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342369536Disabling compacts and flushes for region at 1731342369536Disabling writes for close at 1731342369536Writing region close event to WAL at 1731342369537 (+1 ms)Closed at 1731342369537 2024-11-11T16:26:09,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] procedure.ProcedureSyncWait(219): waitFor pid=7 2024-11-11T16:26:09,689 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(122): Close 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,689 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-11T16:26:09,690 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:09,690 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,690 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,690 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:09,690 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,690 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(2902): Flushing 9e5cca078c8f306f6c3dea9fad229919 2/2 column families, dataSize=31 B heapSize=616 B 2024-11-11T16:26:09,712 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/bd0723cd43554ba48822a261822be856 is 35, key is r1/cf1:q/1731342369497/Put/seqid=0 2024-11-11T16:26:09,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741887_1065 (size=4783) 2024-11-11T16:26:09,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741887_1065 (size=4783) 2024-11-11T16:26:09,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741887_1065 (size=4783) 2024-11-11T16:26:09,724 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/bd0723cd43554ba48822a261822be856 2024-11-11T16:26:09,734 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/bd0723cd43554ba48822a261822be856 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856 2024-11-11T16:26:09,743 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T16:26:09,745 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 9e5cca078c8f306f6c3dea9fad229919 in 55ms, sequenceid=5, compaction requested=false 2024-11-11T16:26:09,745 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testReplayEditsAfterRegionMovedWithMultiCF' 2024-11-11T16:26:09,769 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T16:26:09,773 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:09,773 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342369690Running coprocessor pre-close hooks at 1731342369690Disabling compacts and flushes for region at 1731342369690Disabling writes for close at 1731342369690Obtaining lock to block concurrent updates at 1731342369690Preparing flush snapshotting stores in 9e5cca078c8f306f6c3dea9fad229919 at 1731342369690Finished memstore snapshotting testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., syncing WAL and waiting on mvcc, flushsize=dataSize=31, getHeapSize=584, getOffHeapSize=0, getCellsCount=1 at 1731342369691 (+1 ms)Flushing stores of testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. at 1731342369691Flushing 9e5cca078c8f306f6c3dea9fad229919/cf1: creating writer at 1731342369692 (+1 ms)Flushing 9e5cca078c8f306f6c3dea9fad229919/cf1: appending metadata at 1731342369712 (+20 ms)Flushing 9e5cca078c8f306f6c3dea9fad229919/cf1: closing flushed file at 1731342369712Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f87c4b: reopening flushed file at 1731342369733 (+21 ms)Finished flush of dataSize ~31 B/31, heapSize ~344 B/344, currentSize=0 B/0 for 9e5cca078c8f306f6c3dea9fad229919 in 55ms, sequenceid=5, compaction requested=false at 1731342369745 (+12 ms)Writing region close event to WAL at 1731342369756 (+11 ms)Running coprocessor post-close hooks at 1731342369770 (+14 ms)Closed at 1731342369773 (+3 ms) 2024-11-11T16:26:09,774 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] regionserver.HRegionServer(3302): Adding 9e5cca078c8f306f6c3dea9fad229919 move to 16b413a53992,43519,1731342349897 record at close sequenceid=5 2024-11-11T16:26:09,781 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=8}] handler.UnassignRegionHandler(157): Closed 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:09,781 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=CLOSED 2024-11-11T16:26:09,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 because future has completed 2024-11-11T16:26:09,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T16:26:09,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 in 262 msec 2024-11-11T16:26:09,817 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE; state=CLOSED, location=16b413a53992,43519,1731342349897; forceNewPlan=false, retain=false 2024-11-11T16:26:09,971 INFO [16b413a53992:40215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T16:26:09,971 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPENING, regionLocation=16b413a53992,43519,1731342349897 2024-11-11T16:26:09,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE because future has completed 2024-11-11T16:26:09,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897}] 2024-11-11T16:26:10,134 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,134 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7752): Opening region: {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:10,135 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,135 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:10,135 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7794): checking encryption for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,135 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(7797): checking classloading for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,137 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,138 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:10,138 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:10,145 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856 2024-11-11T16:26:10,145 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:10,146 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,146 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:10,147 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:10,147 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:10,147 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1038): replaying wal for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,148 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,150 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,151 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1048): stopping wal replay for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,151 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1060): Cleaning up temporary data for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,152 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-11T16:26:10,153 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1093): writing seq id for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,154 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1114): Opened 9e5cca078c8f306f6c3dea9fad229919; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74314737, jitterRate=0.10737587511539459}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-11T16:26:10,154 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,155 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegion(1006): Region open journal for 9e5cca078c8f306f6c3dea9fad229919: Running coprocessor pre-open hook at 1731342370135Writing region info on filesystem at 1731342370135Initializing all the Stores at 1731342370137 (+2 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342370137Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342370137Cleaning up temporary data from old regions at 1731342370151 (+14 ms)Running coprocessor post-open hooks at 1731342370154 (+3 ms)Region opened successfully at 1731342370155 (+1 ms) 2024-11-11T16:26:10,157 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., pid=9, masterSystemTime=1731342370129 2024-11-11T16:26:10,159 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,159 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=9}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,160 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=7 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPEN, openSeqNum=9, regionLocation=16b413a53992,43519,1731342349897 2024-11-11T16:26:10,163 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897 because future has completed 2024-11-11T16:26:10,167 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-11T16:26:10,167 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897 in 190 msec 2024-11-11T16:26:10,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE in 647 msec 2024-11-11T16:26:10,179 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:26:10,181 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38090, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:26:10,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 139 connection: 172.17.0.2:49836 deadline: 1731342430185, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43519 startCode=1731342349897. As of locationSeqNum=5. 2024-11-11T16:26:10,193 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=2 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=2, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43519 startCode=1731342349897. As of locationSeqNum=5. 2024-11-11T16:26:10,193 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=2 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43519 startCode=1731342349897. As of locationSeqNum=5. 2024-11-11T16:26:10,193 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=2 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43519,1731342349897, seqNum=5 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43519 startCode=1731342349897. As of locationSeqNum=5. 2024-11-11T16:26:10,305 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:26:10,307 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:26:10,320 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 9e5cca078c8f306f6c3dea9fad229919 2/2 column families, dataSize=50 B heapSize=720 B 2024-11-11T16:26:10,345 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/d59ae7580b0a48c2a0dd06a1905fef04 is 29, key is r1/cf1:/1731342370309/DeleteFamily/seqid=0 2024-11-11T16:26:10,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741888_1066 (size=4906) 2024-11-11T16:26:10,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741888_1066 (size=4906) 2024-11-11T16:26:10,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741888_1066 (size=4906) 2024-11-11T16:26:10,357 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,366 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,383 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf2/c613fedd85e940c98b1cbe1788a96d16 is 29, key is r1/cf2:/1731342370309/DeleteFamily/seqid=0 2024-11-11T16:26:10,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741889_1067 (size=4906) 2024-11-11T16:26:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741889_1067 (size=4906) 2024-11-11T16:26:10,393 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=25 B at sequenceid=12 (bloomFilter=false), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf2/c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741889_1067 (size=4906) 2024-11-11T16:26:10,401 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,402 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/d59ae7580b0a48c2a0dd06a1905fef04 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,410 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,410 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04, entries=1, sequenceid=12, filesize=4.8 K 2024-11-11T16:26:10,411 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf2/c613fedd85e940c98b1cbe1788a96d16 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,418 INFO [Time-limited test {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,418 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16, entries=1, sequenceid=12, filesize=4.8 K 2024-11-11T16:26:10,419 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~50 B/50, heapSize ~688 B/688, currentSize=0 B/0 for 9e5cca078c8f306f6c3dea9fad229919 in 99ms, sequenceid=12, compaction requested=false 2024-11-11T16:26:10,419 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9e5cca078c8f306f6c3dea9fad229919: 2024-11-11T16:26:10,422 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T16:26:10,424 DEBUG [Time-limited test {}] regionserver.HStore(1541): 9e5cca078c8f306f6c3dea9fad229919/cf1 is initiating major compaction (all files) 2024-11-11T16:26:10,424 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:26:10,425 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:26:10,425 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 9e5cca078c8f306f6c3dea9fad229919/cf1 in testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,426 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856, hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04] into tmpdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp, totalSize=9.5 K 2024-11-11T16:26:10,427 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting bd0723cd43554ba48822a261822be856, keycount=1, bloomtype=NONE, size=4.7 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731342369497 2024-11-11T16:26:10,428 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting d59ae7580b0a48c2a0dd06a1905fef04, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-11T16:26:10,441 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 9e5cca078c8f306f6c3dea9fad229919#cf1#compaction#16 average throughput is 0.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T16:26:10,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741890_1068 (size=4626) 2024-11-11T16:26:10,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741890_1068 (size=4626) 2024-11-11T16:26:10,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741890_1068 (size=4626) 2024-11-11T16:26:10,459 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf1/a17ba28e32794ff9832d967404e05609 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/a17ba28e32794ff9832d967404e05609 2024-11-11T16:26:10,474 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 2 (all) file(s) in 9e5cca078c8f306f6c3dea9fad229919/cf1 of 9e5cca078c8f306f6c3dea9fad229919 into a17ba28e32794ff9832d967404e05609(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T16:26:10,474 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 9e5cca078c8f306f6c3dea9fad229919: 2024-11-11T16:26:10,474 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T16:26:10,474 DEBUG [Time-limited test {}] regionserver.HStore(1541): 9e5cca078c8f306f6c3dea9fad229919/cf2 is initiating major compaction (all files) 2024-11-11T16:26:10,474 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:26:10,474 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:26:10,475 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 9e5cca078c8f306f6c3dea9fad229919/cf2 in testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,475 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16] into tmpdir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp, totalSize=4.8 K 2024-11-11T16:26:10,475 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting c613fedd85e940c98b1cbe1788a96d16, keycount=1, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=9223372036854775807 2024-11-11T16:26:10,481 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 9e5cca078c8f306f6c3dea9fad229919#cf2#compaction#17 average throughput is NaN MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T16:26:10,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741891_1069 (size=4592) 2024-11-11T16:26:10,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741891_1069 (size=4592) 2024-11-11T16:26:10,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741891_1069 (size=4592) 2024-11-11T16:26:10,496 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/.tmp/cf2/ed56f31d05b14deaac9a2bf1e6ba24cd as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/ed56f31d05b14deaac9a2bf1e6ba24cd 2024-11-11T16:26:10,503 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 1 (all) file(s) in 9e5cca078c8f306f6c3dea9fad229919/cf2 of 9e5cca078c8f306f6c3dea9fad229919 into ed56f31d05b14deaac9a2bf1e6ba24cd(size=4.5 K), total size for store is 4.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T16:26:10,503 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 9e5cca078c8f306f6c3dea9fad229919: 2024-11-11T16:26:10,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(2410): Client=jenkins//172.17.0.2 move hri=9e5cca078c8f306f6c3dea9fad229919, source=16b413a53992,43519,1731342349897, destination=16b413a53992,43811,1731342350126, warming up region on 16b413a53992,43811,1731342350126 2024-11-11T16:26:10,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(2414): Client=jenkins//172.17.0.2 move hri=9e5cca078c8f306f6c3dea9fad229919, source=16b413a53992,43519,1731342349897, destination=16b413a53992,43811,1731342350126, running balancer 2024-11-11T16:26:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] procedure2.ProcedureExecutor(1139): Stored pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE 2024-11-11T16:26:10,510 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE 2024-11-11T16:26:10,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.RSRpcServices(2066): Warmup testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,511 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=CLOSING, regionLocation=16b413a53992,43519,1731342349897 2024-11-11T16:26:10,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(7855): Warmup {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:10,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:10,512 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,513 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:10,513 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:10,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE because future has completed 2024-11-11T16:26:10,515 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T16:26:10,515 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897}] 2024-11-11T16:26:10,522 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/a17ba28e32794ff9832d967404e05609 2024-11-11T16:26:10,527 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856 2024-11-11T16:26:10,532 INFO [StoreFileOpener-9e5cca078c8f306f6c3dea9fad229919-cf1-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,532 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,533 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:10,533 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,534 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:10,534 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:10,541 INFO [StoreFileOpener-9e5cca078c8f306f6c3dea9fad229919-cf2-1 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,541 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,545 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/ed56f31d05b14deaac9a2bf1e6ba24cd 2024-11-11T16:26:10,545 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:10,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:10,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:10,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43811 {}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342370546Disabling compacts and flushes for region at 1731342370546Disabling writes for close at 1731342370546Writing region close event to WAL at 1731342370547 (+1 ms)Closed at 1731342370547 2024-11-11T16:26:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] procedure.ProcedureSyncWait(219): waitFor pid=10 2024-11-11T16:26:10,669 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(122): Close 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,669 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-11T16:26:10,669 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:10,670 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,670 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,670 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:10,670 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,670 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856, hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04] to archive 2024-11-11T16:26:10,673 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T16:26:10,677 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856 to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/bd0723cd43554ba48822a261822be856 2024-11-11T16:26:10,679 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04 to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/d59ae7580b0a48c2a0dd06a1905fef04 2024-11-11T16:26:10,693 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16] to archive 2024-11-11T16:26:10,694 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T16:26:10,696 DEBUG [StoreCloser-testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16 to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/archive/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/c613fedd85e940c98b1cbe1788a96d16 2024-11-11T16:26:10,702 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=8 2024-11-11T16:26:10,703 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:10,703 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342370669Running coprocessor pre-close hooks at 1731342370669Disabling compacts and flushes for region at 1731342370669Disabling writes for close at 1731342370670 (+1 ms)Writing region close event to WAL at 1731342370697 (+27 ms)Running coprocessor post-close hooks at 1731342370703 (+6 ms)Closed at 1731342370703 2024-11-11T16:26:10,703 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] regionserver.HRegionServer(3302): Adding 9e5cca078c8f306f6c3dea9fad229919 move to 16b413a53992,43811,1731342350126 record at close sequenceid=12 2024-11-11T16:26:10,706 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION, pid=11}] handler.UnassignRegionHandler(157): Closed 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:10,707 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=CLOSED 2024-11-11T16:26:10,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=10, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897 because future has completed 2024-11-11T16:26:10,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=10 2024-11-11T16:26:10,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=10, state=SUCCESS, hasLock=false; CloseRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43519,1731342349897 in 197 msec 2024-11-11T16:26:10,716 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE; state=CLOSED, location=16b413a53992,43811,1731342350126; forceNewPlan=false, retain=false 2024-11-11T16:26:10,867 INFO [16b413a53992:40215 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T16:26:10,867 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPENING, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:26:10,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE because future has completed 2024-11-11T16:26:10,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126}] 2024-11-11T16:26:11,028 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,028 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:11,029 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,029 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:11,029 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,029 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,030 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,031 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:11,032 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:11,038 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/a17ba28e32794ff9832d967404e05609 2024-11-11T16:26:11,038 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:11,039 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,040 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:11,040 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:11,048 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/ed56f31d05b14deaac9a2bf1e6ba24cd 2024-11-11T16:26:11,048 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:11,048 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,049 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,050 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,051 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,051 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,052 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-11T16:26:11,053 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,054 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 9e5cca078c8f306f6c3dea9fad229919; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60654888, jitterRate=-0.09617173671722412}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-11T16:26:11,054 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,055 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 9e5cca078c8f306f6c3dea9fad229919: Running coprocessor pre-open hook at 1731342371029Writing region info on filesystem at 1731342371029Initializing all the Stores at 1731342371030 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342371030Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342371030Cleaning up temporary data from old regions at 1731342371051 (+21 ms)Running coprocessor post-open hooks at 1731342371054 (+3 ms)Region opened successfully at 1731342371055 (+1 ms) 2024-11-11T16:26:11,056 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., pid=12, masterSystemTime=1731342371024 2024-11-11T16:26:11,059 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,059 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,060 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPEN, openSeqNum=18, regionLocation=16b413a53992,43811,1731342350126 2024-11-11T16:26:11,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 because future has completed 2024-11-11T16:26:11,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-11T16:26:11,067 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,43811,1731342350126 in 192 msec 2024-11-11T16:26:11,069 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, REOPEN/MOVE in 559 msec 2024-11-11T16:26:11,112 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:26:11,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57606, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:26:11,116 ERROR [Time-limited test {}] regionserver.HRegionServer(2442): ***** ABORTING region server 16b413a53992,43811,1731342350126: testing ***** 2024-11-11T16:26:11,116 ERROR [Time-limited test {}] regionserver.HRegionServer(2447): RegionServer abort: loaded coprocessors are: [org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint] 2024-11-11T16:26:11,119 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for java.lang:type=Memory 2024-11-11T16:26:11,120 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=IPC 2024-11-11T16:26:11,126 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Replication 2024-11-11T16:26:11,129 DEBUG [Time-limited test {}] util.JSONBean(135): Listing beans for Hadoop:service=HBase,name=RegionServer,sub=Server 2024-11-11T16:26:11,143 INFO [Time-limited test {}] regionserver.HRegionServer(2451): Dump of metrics as JSON on abort: { "beans": [ { "name": "java.lang:type=Memory", "modelerType": "sun.management.MemoryImpl", "ObjectPendingFinalizationCount": 0, "HeapMemoryUsage": { "committed": 1048576000, "init": 1048576000, "max": 2306867200, "used": 357066200 }, "NonHeapMemoryUsage": { "committed": 172294144, "init": 7667712, "max": -1, "used": 169688088 }, "Verbose": false, "ObjectName": "java.lang:type=Memory" } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=IPC", "modelerType": "RegionServer,sub=IPC", "tag.Context": "regionserver", "tag.Hostname": "16b413a53992", "queueSize": 0, "numCallsInGeneralQueue": 0, "numCallsInReplicationQueue": 0, "numCallsInBulkLoadQueue": 0, "numCallsInPriorityQueue": 0, "numCallsInMetaPriorityQueue": 0, "numOpenConnections": 3, "numActiveHandler": 0, "numActiveGeneralHandler": 0, "numActivePriorityHandler": 0, "numActiveReplicationHandler": 0, "numGeneralCallsDropped": 0, "numLifoModeSwitches": 0, "numCallsInWriteQueue": 0, "numActiveBulkLoadHandler": 0, "numCallsInReadQueue": 0, "numCallsInScanQueue": 0, "numActiveWriteHandler": 0, "numActiveReadHandler": 0, "numActiveScanHandler": 0, "nettyDirectMemoryUsage": 67108864, "nettyTotalPendingOutboundBytes": 0, "nettyMaxPendingOutboundBytes": 0, "receivedBytes": 10271, "exceptions.RegionMovedException": 1, "authenticationSuccesses": 0, "authorizationFailures": 0, "exceptions.requestTooBig": 0, "UnwritableTime_num_ops": 0, "UnwritableTime_min": 0, "UnwritableTime_max": 0, "UnwritableTime_mean": 0, "UnwritableTime_25th_percentile": 0, "UnwritableTime_median": 0, "UnwritableTime_75th_percentile": 0, "UnwritableTime_90th_percentile": 0, "UnwritableTime_95th_percentile": 0, "UnwritableTime_98th_percentile": 0, "UnwritableTime_99th_percentile": 0, "UnwritableTime_99.9th_percentile": 0, "exceptions.OutOfOrderScannerNextException": 0, "exceptions.rpcThrottling": 0, "exceptions.otherExceptions": 0, "ProcessCallTime_num_ops": 39, "ProcessCallTime_min": 0, "ProcessCallTime_max": 39, "ProcessCallTime_mean": 4, "ProcessCallTime_25th_percentile": 1, "ProcessCallTime_median": 2, "ProcessCallTime_75th_percentile": 4, "ProcessCallTime_90th_percentile": 7, "ProcessCallTime_95th_percentile": 15, "ProcessCallTime_98th_percentile": 39, "ProcessCallTime_99th_percentile": 39, "ProcessCallTime_99.9th_percentile": 39, "ProcessCallTime_TimeRangeCount_0-1": 9, "ProcessCallTime_TimeRangeCount_1-3": 13, "ProcessCallTime_TimeRangeCount_3-10": 7, "ProcessCallTime_TimeRangeCount_10-30": 1, "ProcessCallTime_TimeRangeCount_30-100": 1, "exceptions.callQueueTooBig": 0, "QueueCallTime_num_ops": 39, "QueueCallTime_min": 0, "QueueCallTime_max": 1, "QueueCallTime_mean": 0, "QueueCallTime_25th_percentile": 0, "QueueCallTime_median": 0, "QueueCallTime_75th_percentile": 0, "QueueCallTime_90th_percentile": 0, "QueueCallTime_95th_percentile": 0, "QueueCallTime_98th_percentile": 1, "QueueCallTime_99th_percentile": 1, "QueueCallTime_99.9th_percentile": 1, "QueueCallTime_TimeRangeCount_0-1": 31, "authenticationFailures": 0, "exceptions.multiResponseTooLarge": 0, "exceptions.callDropped": 0, "TotalCallTime_num_ops": 39, "TotalCallTime_min": 0, "TotalCallTime_max": 39, "TotalCallTime_mean": 4, "TotalCallTime_25th_percentile": 1, "TotalCallTime_median": 2, "TotalCallTime_75th_percentile": 4, "TotalCallTime_90th_percentile": 7, "TotalCallTime_95th_percentile": 15, "TotalCallTime_98th_percentile": 39, "TotalCallTime_99th_percentile": 39, "TotalCallTime_99.9th_percentile": 39, "TotalCallTime_TimeRangeCount_0-1": 8, "TotalCallTime_TimeRangeCount_1-3": 14, "TotalCallTime_TimeRangeCount_3-10": 7, "TotalCallTime_TimeRangeCount_10-30": 1, "TotalCallTime_TimeRangeCount_30-100": 1, "exceptions.RegionTooBusyException": 0, "exceptions.FailedSanityCheckException": 0, "ResponseSize_num_ops": 39, "ResponseSize_min": 0, "ResponseSize_max": 1138, "ResponseSize_mean": 159, "ResponseSize_25th_percentile": 2, "ResponseSize_median": 2, "ResponseSize_75th_percentile": 74, "ResponseSize_90th_percentile": 453, "ResponseSize_95th_percentile": 795, "ResponseSize_98th_percentile": 1001, "ResponseSize_99th_percentile": 1069, "ResponseSize_99.9th_percentile": 1131, "ResponseSize_SizeRangeCount_0-10": 22, "ResponseSize_SizeRangeCount_10-100": 4, "ResponseSize_SizeRangeCount_100-1000": 5, "exceptions.UnknownScannerException": 0, "exceptions": 1, "maxOutboundBytesExceeded": 0, "authenticationFallbacks": 0, "exceptions.quotaExceeded": 0, "exceptions.callTimedOut": 0, "exceptions.NotServingRegionException": 0, "authorizationSuccesses": 0, "exceptions.ScannerResetException": 0, "RequestSize_num_ops": 39, "RequestSize_min": 31, "RequestSize_max": 390, "RequestSize_mean": 204, "RequestSize_25th_percentile": 119, "RequestSize_median": 179, "RequestSize_75th_percentile": 330, "RequestSize_90th_percentile": 330, "RequestSize_95th_percentile": 365, "RequestSize_98th_percentile": 380, "RequestSize_99th_percentile": 385, "RequestSize_99.9th_percentile": 389, "RequestSize_SizeRangeCount_0-10": 2, "RequestSize_SizeRangeCount_100-1000": 29, "sentBytes": 5772 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Replication", "modelerType": "RegionServer,sub=Replication", "tag.Context": "regionserver", "tag.Hostname": "16b413a53992", "source.shippedHFiles": 0, "Source.ageOfLastShippedOp_num_ops": 0, "Source.ageOfLastShippedOp_min": 0, "Source.ageOfLastShippedOp_max": 0, "Source.ageOfLastShippedOp_mean": 0, "Source.ageOfLastShippedOp_25th_percentile": 0, "Source.ageOfLastShippedOp_median": 0, "Source.ageOfLastShippedOp_75th_percentile": 0, "Source.ageOfLastShippedOp_90th_percentile": 0, "Source.ageOfLastShippedOp_95th_percentile": 0, "Source.ageOfLastShippedOp_98th_percentile": 0, "Source.ageOfLastShippedOp_99th_percentile": 0, "Source.ageOfLastShippedOp_99.9th_percentile": 0, "source.uncleanlyClosedLogs": 0, "source.closedLogsWithUnknownFileLength": 0, "source.walReaderEditsBufferUsage": 0, "source.repeatedLogFileBytes": 0, "source.sizeOfHFileRefsQueue": 0, "source.logReadInBytes": 0, "source.completedRecoverQueues": 0, "source.sizeOfLogQueue": 0, "source.restartedLogReading": 0, "source.failedRecoverQueues": 0, "source.ignoredUncleanlyClosedLogContentsInBytes": 0, "Sink.ageOfLastAppliedOp_num_ops": 0, "Sink.ageOfLastAppliedOp_min": 0, "Sink.ageOfLastAppliedOp_max": 0, "Sink.ageOfLastAppliedOp_mean": 0, "Sink.ageOfLastAppliedOp_25th_percentile": 0, "Sink.ageOfLastAppliedOp_median": 0, "Sink.ageOfLastAppliedOp_75th_percentile": 0, "Sink.ageOfLastAppliedOp_90th_percentile": 0, "Sink.ageOfLastAppliedOp_95th_percentile": 0, "Sink.ageOfLastAppliedOp_98th_percentile": 0, "Sink.ageOfLastAppliedOp_99th_percentile": 0, "Sink.ageOfLastAppliedOp_99.9th_percentile": 0, "source.logEditsRead": 0, "source.numInitializing": 0, "source.shippedOps": 0, "sink.appliedHFiles": 0, "source.logEditsFiltered": 0, "source.shippedBytes": 0, "sink.appliedOps": 0, "source.completedLogs": 0, "source.failedBatches": 0, "sink.failedBatches": 0, "source.shippedBatches": 0, "sink.appliedBatches": 0 } ], "beans": [ { "name": "Hadoop:service=HBase,name=RegionServer,sub=Server", "modelerType": "RegionServer,sub=Server", "tag.zookeeperQuorum": "127.0.0.1:59036", "tag.serverName": "16b413a53992,43519,1731342349897", "tag.clusterId": "b4307b4d-0d23-478f-a331-4653ccc7b610", "tag.Context": "regionserver", "tag.Hostname": "16b413a53992", "regionCount": 0, "storeCount": 0, "hlogFileCount": 3, "hlogFileSize": 0, "storeFileCount": 0, "maxStoreFileCount": 0, "memStoreSize": 0, "memStoreHeapSize": 0, "memStoreOffHeapSize": 0, "storeFileSize": 0, "storeFileSizeGrowthRate": 0.0, "maxStoreFileAge": 0, "minStoreFileAge": 0, "avgStoreFileAge": 0, "numReferenceFiles": 0, "regionServerStartTime": 1731342349897, "averageRegionSize": 0, "storeFileIndexSize": 0, "staticIndexSize": 0, "staticBloomSize": 0, "bloomFilterRequestsCount": 0, "bloomFilterNegativeResultsCount": 0, "bloomFilterEligibleRequestsCount": 0, "mutationsWithoutWALCount": 0, "mutationsWithoutWALSize": 0, "percentFilesLocal": 0.0, "percentFilesLocalSecondaryRegions": 0.0, "totalBytesRead": 320685, "localBytesRead": 320685, "shortCircuitBytesRead": 0, "zeroCopyBytesRead": 0, "splitQueueLength": 0, "compactionQueueLength": 0, "smallCompactionQueueLength": 0, "largeCompactionQueueLength": 0, "flushQueueLength": 0, "blockCacheFreeSize": 922070024, "blockCacheCount": 0, "blockCacheDataBlockCount": 0, "blockCacheSize": 676856, "blockCacheCountHitPercent": 25.0, "blockCacheExpressHitPercent": 0.0, "l1CacheSize": 676856, "l1CacheFreeSize": 922070024, "l1CacheCount": 0, "l1CacheEvictionCount": 0, "l1CacheHitCount": 1, "l1CacheMissCount": 3, "l1CacheHitRatio": 0.25, "l1CacheMissRatio": 0.75, "l2CacheSize": 0, "l2CacheFreeSize": 0, "l2CacheCount": 0, "l2CacheEvictionCount": 0, "l2CacheHitCount": 0, "l2CacheMissCount": 0, "l2CacheHitRatio": 0.0, "l2CacheMissRatio": 0.0, "mobFileCacheCount": 0, "mobFileCacheHitPercent": 0.0, "readRequestRatePerSecond": 0.0, "writeRequestRatePerSecond": 0.0, "ByteBuffAllocatorHeapAllocationBytes": 227435, "ByteBuffAllocatorPoolAllocationBytes": 0, "ByteBuffAllocatorHeapAllocationRatio": 1.0, "ByteBuffAllocatorTotalBufferCount": 186, "ByteBuffAllocatorUsedBufferCount": 0, "activeScanners": 0, "totalRequestCount": 3, "totalRowActionRequestCount": 0, "readRequestCount": 0, "cpRequestCount": 0, "filteredReadRequestCount": 0, "writeRequestCount": 0, "rpcGetRequestCount": 0, "rpcFullScanRequestCount": 1, "rpcScanRequestCount": 1, "rpcMultiRequestCount": 0, "rpcMutateRequestCount": 1, "checkMutateFailedCount": 0, "checkMutatePassedCount": 0, "blockCacheHitCount": 1, "blockCacheHitCountPrimary": 1, "blockCacheHitCachingCount": 0, "blockCacheMissCount": 3, "blockCacheMissCountPrimary": 3, "blockCacheMissCachingCount": 1, "blockCacheEvictionCount": 0, "blockCacheEvictionCountPrimary": 0, "blockCacheFailedInsertionCount": 0, "blockCacheDataMissCount": 3, "blockCacheLeafIndexMissCount": 0, "blockCacheBloomChunkMissCount": 0, "blockCacheMetaMissCount": 0, "blockCacheRootIndexMissCount": 0, "blockCacheIntermediateIndexMissCount": 0, "blockCacheFileInfoMissCount": 0, "blockCacheGeneralBloomMetaMissCount": 0, "blockCacheDeleteFamilyBloomMissCount": 0, "blockCacheTrailerMissCount": 0, "blockCacheDataHitCount": 1, "blockCacheLeafIndexHitCount": 0, "blockCacheBloomChunkHitCount": 0, "blockCacheMetaHitCount": 0, "blockCacheRootIndexHitCount": 0, "blockCacheIntermediateIndexHitCount": 0, "blockCacheFileInfoHitCount": 0, "blockCacheGeneralBloomMetaHitCount": 0, "blockCacheDeleteFamilyBloomHitCount": 0, "blockCacheTrailerHitCount": 0, "updatesBlockedTime": 0, "flushedCellsCount": 0, "compactedCellsCount": 0, "majorCompactedCellsCount": 0, "flushedCellsSize": 0, "compactedCellsSize": 0, "majorCompactedCellsSize": 0, "cellsCountCompactedFromMob": 0, "cellsCountCompactedToMob": 0, "cellsSizeCompactedFromMob": 0, "cellsSizeCompactedToMob": 0, "mobFlushCount": 0, "mobFlushedCellsCount": 0, "mobFlushedCellsSize": 0, "mobScanCellsCount": 0, "mobScanCellsSize": 0, "mobFileCacheAccessCount": 0, "mobFileCacheMissCount": 0, "mobFileCacheEvictedCount": 0, "hedgedReads": 0, "hedgedReadWins": 0, "hedgedReadOpsInCurThread": 0, "blockedRequestCount": 0, "CheckAndMutate_num_ops": 0, "CheckAndMutate_min": 0, "CheckAndMutate_max": 0, "CheckAndMutate_mean": 0, "CheckAndMutate_25th_percentile": 0, "CheckAndMutate_median": 0, "CheckAndMutate_75th_percentile": 0, "CheckAndMutate_90th_percentile": 0, "CheckAndMutate_95th_percentile": 0, "CheckAndMutate_98th_percentile": 0, "CheckAndMutate_99th_percentile": 0, "CheckAndMutate_99.9th_percentile": 0, "MajorCompactionTime_num_ops": 2, "MajorCompactionTime_min": 28, "MajorCompactionTime_max": 47, "MajorCompactionTime_mean": 37, "MajorCompactionTime_25th_percentile": 28, "MajorCompactionTime_median": 29, "MajorCompactionTime_75th_percentile": 47, "MajorCompactionTime_90th_percentile": 47, "MajorCompactionTime_95th_percentile": 47, "MajorCompactionTime_98th_percentile": 47, "MajorCompactionTime_99th_percentile": 47, "MajorCompactionTime_99.9th_percentile": 47, "MajorCompactionTime_TimeRangeCount_10-30": 1, "MajorCompactionTime_TimeRangeCount_30-100": 1, "ScanTime_num_ops": 1, "ScanTime_min": 0, "ScanTime_max": 0, "ScanTime_mean": 0, "ScanTime_25th_percentile": 0, "ScanTime_median": 0, "ScanTime_75th_percentile": 0, "ScanTime_90th_percentile": 0, "ScanTime_95th_percentile": 0, "ScanTime_98th_percentile": 0, "ScanTime_99th_percentile": 0, "ScanTime_99.9th_percentile": 0, "ScanTime_TimeRangeCount_0-1": 1, "CheckAndMutateBlockBytesScanned_num_ops": 0, "CheckAndMutateBlockBytesScanned_min": 0, "CheckAndMutateBlockBytesScanned_max": 0, "CheckAndMutateBlockBytesScanned_mean": 0, "CheckAndMutateBlockBytesScanned_25th_percentile": 0, "CheckAndMutateBlockBytesScanned_median": 0, "CheckAndMutateBlockBytesScanned_75th_percentile": 0, "CheckAndMutateBlockBytesScanned_90th_percentile": 0, "CheckAndMutateBlockBytesScanned_95th_percentile": 0, "CheckAndMutateBlockBytesScanned_98th_percentile": 0, "CheckAndMutateBlockBytesScanned_99th_percentile": 0, "CheckAndMutateBlockBytesScanned_99.9th_percentile": 0, "Put_num_ops": 0, "Put_min": 0, "Put_max": 0, "Put_mean": 0, "Put_25th_percentile": 0, "Put_median": 0, "Put_75th_percentile": 0, "Put_90th_percentile": 0, "Put_95th_percentile": 0, "Put_98th_percentile": 0, "Put_99th_percentile": 0, "Put_99.9th_percentile": 0, "splitRequestCount": 0, "AppendBlockBytesScanned_num_ops": 0, "AppendBlockBytesScanned_min": 0, "AppendBlockBytesScanned_max": 0, "AppendBlockBytesScanned_mean": 0, "AppendBlockBytesScanned_25th_percentile": 0, "AppendBlockBytesScanned_median": 0, "AppendBlockBytesScanned_75th_percentile": 0, "AppendBlockBytesScanned_90th_percentile": 0, "AppendBlockBytesScanned_95th_percentile": 0, "AppendBlockBytesScanned_98th_percentile": 0, "AppendBlockBytesScanned_99th_percentile": 0, "AppendBlockBytesScanned_99.9th_percentile": 0, "PutBatch_num_ops": 0, "PutBatch_min": 0, "PutBatch_max": 0, "PutBatch_mean": 0, "PutBatch_25th_percentile": 0, "PutBatch_median": 0, "PutBatch_75th_percentile": 0, "PutBatch_90th_percentile": 0, "PutBatch_95th_percentile": 0, "PutBatch_98th_percentile": 0, "PutBatch_99th_percentile": 0, "PutBatch_99.9th_percentile": 0, "IncrementBlockBytesScanned_num_ops": 0, "IncrementBlockBytesScanned_min": 0, "IncrementBlockBytesScanned_max": 0, "IncrementBlockBytesScanned_mean": 0, "IncrementBlockBytesScanned_25th_percentile": 0, "IncrementBlockBytesScanned_median": 0, "IncrementBlockBytesScanned_75th_percentile": 0, "IncrementBlockBytesScanned_90th_percentile": 0, "IncrementBlockBytesScanned_95th_percentile": 0, "IncrementBlockBytesScanned_98th_percentile": 0, "IncrementBlockBytesScanned_99th_percentile": 0, "IncrementBlockBytesScanned_99.9th_percentile": 0, "SplitTime_num_ops": 0, "SplitTime_min": 0, "SplitTime_max": 0, "SplitTime_mean": 0, "SplitTime_25th_percentile": 0, "SplitTime_median": 0, "SplitTime_75th_percentile": 0, "SplitTime_90th_percentile": 0, "SplitTime_95th_percentile": 0, "SplitTime_98th_percentile": 0, "SplitTime_99th_percentile": 0, "SplitTime_99.9th_percentile": 0, "GetBlockBytesScanned_num_ops": 0, "GetBlockBytesScanned_min": 0, "GetBlockBytesScanned_max": 0, "GetBlockBytesScanned_mean": 0, "GetBlockBytesScanned_25th_percentile": 0, "GetBlockBytesScanned_median": 0, "GetBlockBytesScanned_75th_percentile": 0, "GetBlockBytesScanned_90th_percentile": 0, "GetBlockBytesScanned_95th_percentile": 0, "GetBlockBytesScanned_98th_percentile": 0, "GetBlockBytesScanned_99th_percentile": 0, "GetBlockBytesScanned_99.9th_percentile": 0, "majorCompactedInputBytes": 14595, "slowAppendCount": 0, "flushedOutputBytes": 9812, "Replay_num_ops": 0, "Replay_min": 0, "Replay_max": 0, "Replay_mean": 0, "Replay_25th_percentile": 0, "Replay_median": 0, "Replay_75th_percentile": 0, "Replay_90th_percentile": 0, "Replay_95th_percentile": 0, "Replay_98th_percentile": 0, "Replay_99th_percentile": 0, "Replay_99.9th_percentile": 0, "MajorCompactionInputSize_num_ops": 2, "MajorCompactionInputSize_min": 4906, "MajorCompactionInputSize_max": 9689, "MajorCompactionInputSize_mean": 7297, "MajorCompactionInputSize_25th_percentile": 6101, "MajorCompactionInputSize_median": 7297, "MajorCompactionInputSize_75th_percentile": 8493, "MajorCompactionInputSize_90th_percentile": 9210, "MajorCompactionInputSize_95th_percentile": 9449, "MajorCompactionInputSize_98th_percentile": 9593, "MajorCompactionInputSize_99th_percentile": 9641, "MajorCompactionInputSize_99.9th_percentile": 9684, "MajorCompactionInputSize_SizeRangeCount_100-1000": 2, "pauseInfoThresholdExceeded": 0, "CheckAndDelete_num_ops": 0, "CheckAndDelete_min": 0, "CheckAndDelete_max": 0, "CheckAndDelete_mean": 0, "CheckAndDelete_25th_percentile": 0, "CheckAndDelete_median": 0, "CheckAndDelete_75th_percentile": 0, "CheckAndDelete_90th_percentile": 0, "CheckAndDelete_95th_percentile": 0, "CheckAndDelete_98th_percentile": 0, "CheckAndDelete_99th_percentile": 0, "CheckAndDelete_99.9th_percentile": 0, "CompactionInputSize_num_ops": 2, "CompactionInputSize_min": 4906, "CompactionInputSize_max": 9689, "CompactionInputSize_mean": 7297, "CompactionInputSize_25th_percentile": 6101, "CompactionInputSize_median": 7297, "CompactionInputSize_75th_percentile": 8493, "CompactionInputSize_90th_percentile": 9210, "CompactionInputSize_95th_percentile": 9449, "CompactionInputSize_98th_percentile": 9593, "CompactionInputSize_99th_percentile": 9641, "CompactionInputSize_99.9th_percentile": 9684, "CompactionInputSize_SizeRangeCount_100-1000": 2, "flushedMemstoreBytes": 50, "majorCompactedOutputBytes": 9218, "slowPutCount": 0, "compactedInputBytes": 14595, "FlushOutputSize_num_ops": 1, "FlushOutputSize_min": 9812, "FlushOutputSize_max": 9812, "FlushOutputSize_mean": 9812, "FlushOutputSize_25th_percentile": 9812, "FlushOutputSize_median": 9812, "FlushOutputSize_75th_percentile": 9812, "FlushOutputSize_90th_percentile": 9812, "FlushOutputSize_95th_percentile": 9812, "FlushOutputSize_98th_percentile": 9812, "FlushOutputSize_99th_percentile": 9812, "FlushOutputSize_99.9th_percentile": 9812, "FlushOutputSize_SizeRangeCount_100-1000": 1, "PauseTimeWithGc_num_ops": 0, "PauseTimeWithGc_min": 0, "PauseTimeWithGc_max": 0, "PauseTimeWithGc_mean": 0, "PauseTimeWithGc_25th_percentile": 0, "PauseTimeWithGc_median": 0, "PauseTimeWithGc_75th_percentile": 0, "PauseTimeWithGc_90th_percentile": 0, "PauseTimeWithGc_95th_percentile": 0, "PauseTimeWithGc_98th_percentile": 0, "PauseTimeWithGc_99th_percentile": 0, "PauseTimeWithGc_99.9th_percentile": 0, "compactedOutputBytes": 9218, "pauseWarnThresholdExceeded": 0, "ScanBlockBytesScanned_num_ops": 1, "ScanBlockBytesScanned_min": 32, "ScanBlockBytesScanned_max": 32, "ScanBlockBytesScanned_mean": 32, "ScanBlockBytesScanned_25th_percentile": 32, "ScanBlockBytesScanned_median": 32, "ScanBlockBytesScanned_75th_percentile": 32, "ScanBlockBytesScanned_90th_percentile": 32, "ScanBlockBytesScanned_95th_percentile": 32, "ScanBlockBytesScanned_98th_percentile": 32, "ScanBlockBytesScanned_99th_percentile": 32, "ScanBlockBytesScanned_99.9th_percentile": 32, "ScanBlockBytesScanned_SizeRangeCount_10-100": 1, "Increment_num_ops": 0, "Increment_min": 0, "Increment_max": 0, "Increment_mean": 0, "Increment_25th_percentile": 0, "Increment_median": 0, "Increment_75th_percentile": 0, "Increment_90th_percentile": 0, "Increment_95th_percentile": 0, "Increment_98th_percentile": 0, "Increment_99th_percentile": 0, "Increment_99.9th_percentile": 0, "Delete_num_ops": 1, "Delete_min": 4, "Delete_max": 4, "Delete_mean": 4, "Delete_25th_percentile": 4, "Delete_median": 4, "Delete_75th_percentile": 4, "Delete_90th_percentile": 4, "Delete_95th_percentile": 4, "Delete_98th_percentile": 4, "Delete_99th_percentile": 4, "Delete_99.9th_percentile": 4, "Delete_TimeRangeCount_3-10": 1, "DeleteBatch_num_ops": 0, "DeleteBatch_min": 0, "DeleteBatch_max": 0, "DeleteBatch_mean": 0, "DeleteBatch_25th_percentile": 0, "DeleteBatch_median": 0, "DeleteBatch_75th_percentile": 0, "DeleteBatch_90th_percentile": 0, "DeleteBatch_95th_percentile": 0, "DeleteBatch_98th_percentile": 0, "DeleteBatch_99th_percentile": 0, "DeleteBatch_99.9th_percentile": 0, "blockBytesScannedCount": 32, "FlushMemstoreSize_num_ops": 1, "FlushMemstoreSize_min": 50, "FlushMemstoreSize_max": 50, "FlushMemstoreSize_mean": 50, "FlushMemstoreSize_25th_percentile": 50, "FlushMemstoreSize_median": 50, "FlushMemstoreSize_75th_percentile": 50, "FlushMemstoreSize_90th_percentile": 50, "FlushMemstoreSize_95th_percentile": 50, "FlushMemstoreSize_98th_percentile": 50, "FlushMemstoreSize_99th_percentile": 50, "FlushMemstoreSize_99.9th_percentile": 50, "FlushMemstoreSize_SizeRangeCount_10-100": 1, "CompactionInputFileCount_num_ops": 2, "CompactionInputFileCount_min": 1, "CompactionInputFileCount_max": 2, "CompactionInputFileCount_mean": 1, "CompactionInputFileCount_25th_percentile": 1, "CompactionInputFileCount_median": 2, "CompactionInputFileCount_75th_percentile": 2, "CompactionInputFileCount_90th_percentile": 2, "CompactionInputFileCount_95th_percentile": 2, "CompactionInputFileCount_98th_percentile": 2, "CompactionInputFileCount_99th_percentile": 2, "CompactionInputFileCount_99.9th_percentile": 2, "CompactionTime_num_ops": 2, "CompactionTime_min": 28, "CompactionTime_max": 47, "CompactionTime_mean": 37, "CompactionTime_25th_percentile": 28, "CompactionTime_median": 29, "CompactionTime_75th_percentile": 47, "CompactionTime_90th_percentile": 47, "CompactionTime_95th_percentile": 47, "CompactionTime_98th_percentile": 47, "CompactionTime_99th_percentile": 47, "CompactionTime_99.9th_percentile": 47, "CompactionTime_TimeRangeCount_10-30": 1, "CompactionTime_TimeRangeCount_30-100": 1, "Get_num_ops": 0, "Get_min": 0, "Get_max": 0, "Get_mean": 0, "Get_25th_percentile": 0, "Get_median": 0, "Get_75th_percentile": 0, "Get_90th_percentile": 0, "Get_95th_percentile": 0, "Get_98th_percentile": 0, "Get_99th_percentile": 0, "Get_99.9th_percentile": 0, "MajorCompactionInputFileCount_num_ops": 2, "MajorCompactionInputFileCount_min": 1, "MajorCompactionInputFileCount_max": 2, "MajorCompactionInputFileCount_mean": 1, "MajorCompactionInputFileCount_25th_percentile": 1, "MajorCompactionInputFileCount_median": 2, "MajorCompactionInputFileCount_75th_percentile": 2, "MajorCompactionInputFileCount_90th_percentile": 2, "MajorCompactionInputFileCount_95th_percentile": 2, "MajorCompactionInputFileCount_98th_percentile": 2, "MajorCompactionInputFileCount_99th_percentile": 2, "MajorCompactionInputFileCount_99.9th_percentile": 2, "scannerLeaseExpiredCount": 0, "CheckAndPut_num_ops": 0, "CheckAndPut_min": 0, "CheckAndPut_max": 0, "CheckAndPut_mean": 0, "CheckAndPut_25th_percentile": 0, "CheckAndPut_median": 0, "CheckAndPut_75th_percentile": 0, "CheckAndPut_90th_percentile": 0, "CheckAndPut_95th_percentile": 0, "CheckAndPut_98th_percentile": 0, "CheckAndPut_99th_percentile": 0, "CheckAndPut_99.9th_percentile": 0, "MajorCompactionOutputSize_num_ops": 2, "MajorCompactionOutputSize_min": 4592, "MajorCompactionOutputSize_max": 4626, "MajorCompactionOutputSize_mean": 4609, "MajorCompactionOutputSize_25th_percentile": 4600, "MajorCompactionOutputSize_median": 4609, "MajorCompactionOutputSize_75th_percentile": 4617, "MajorCompactionOutputSize_90th_percentile": 4622, "MajorCompactionOutputSize_95th_percentile": 4624, "MajorCompactionOutputSize_98th_percentile": 4625, "MajorCompactionOutputSize_99th_percentile": 4625, "MajorCompactionOutputSize_99.9th_percentile": 4625, "MajorCompactionOutputSize_SizeRangeCount_100-1000": 2, "CompactionOutputFileCount_num_ops": 2, "CompactionOutputFileCount_min": 1, "CompactionOutputFileCount_max": 1, "CompactionOutputFileCount_mean": 1, "CompactionOutputFileCount_25th_percentile": 1, "CompactionOutputFileCount_median": 1, "CompactionOutputFileCount_75th_percentile": 1, "CompactionOutputFileCount_90th_percentile": 1, "CompactionOutputFileCount_95th_percentile": 1, "CompactionOutputFileCount_98th_percentile": 1, "CompactionOutputFileCount_99th_percentile": 1, "CompactionOutputFileCount_99.9th_percentile": 1, "slowDeleteCount": 0, "FlushTime_num_ops": 1, "FlushTime_min": 99, "FlushTime_max": 99, "FlushTime_mean": 99, "FlushTime_25th_percentile": 99, "FlushTime_median": 99, "FlushTime_75th_percentile": 99, "FlushTime_90th_percentile": 99, "FlushTime_95th_percentile": 99, "FlushTime_98th_percentile": 99, "FlushTime_99th_percentile": 99, "FlushTime_99.9th_percentile": 99, "FlushTime_TimeRangeCount_30-100": 1, "splitSuccessCount": 0, "MajorCompactionOutputFileCount_num_ops": 2, "MajorCompactionOutputFileCount_min": 1, "MajorCompactionOutputFileCount_max": 1, "MajorCompactionOutputFileCount_mean": 1, "MajorCompactionOutputFileCount_25th_percentile": 1, "MajorCompactionOutputFileCount_median": 1, "MajorCompactionOutputFileCount_75th_percentile": 1, "MajorCompactionOutputFileCount_90th_percentile": 1, "MajorCompactionOutputFileCount_95th_percentile": 1, "MajorCompactionOutputFileCount_98th_percentile": 1, "MajorCompactionOutputFileCount_99th_percentile": 1, "MajorCompactionOutputFileCount_99.9th_percentile": 1, "slowGetCount": 0, "ScanSize_num_ops": 1, "ScanSize_min": 0, "ScanSize_max": 0, "ScanSize_mean": 0, "ScanSize_25th_percentile": 0, "ScanSize_median": 0, "ScanSize_75th_percentile": 0, "ScanSize_90th_percentile": 0, "ScanSize_95th_percentile": 0, "ScanSize_98th_percentile": 0, "ScanSize_99th_percentile": 0, "ScanSize_99.9th_percentile": 0, "ScanSize_SizeRangeCount_0-10": 1, "CompactionOutputSize_num_ops": 2, "CompactionOutputSize_min": 4592, "CompactionOutputSize_max": 4626, "CompactionOutputSize_mean": 4609, "CompactionOutputSize_25th_percentile": 4600, "CompactionOutputSize_median": 4609, "CompactionOutputSize_75th_percentile": 4617, "CompactionOutputSize_90th_percentile": 4622, "CompactionOutputSize_95th_percentile": 4624, "CompactionOutputSize_98th_percentile": 4625, "CompactionOutputSize_99th_percentile": 4625, "CompactionOutputSize_99.9th_percentile": 4625, "CompactionOutputSize_SizeRangeCount_100-1000": 2, "PauseTimeWithoutGc_num_ops": 0, "PauseTimeWithoutGc_min": 0, "PauseTimeWithoutGc_max": 0, "PauseTimeWithoutGc_mean": 0, "PauseTimeWithoutGc_25th_percentile": 0, "PauseTimeWithoutGc_median": 0, "PauseTimeWithoutGc_75th_percentile": 0, "PauseTimeWithoutGc_90th_percentile": 0, "PauseTimeWithoutGc_95th_percentile": 0, "PauseTimeWithoutGc_98th_percentile": 0, "PauseTimeWithoutGc_99th_percentile": 0, "PauseTimeWithoutGc_99.9th_percentile": 0, "slowIncrementCount": 0, "Append_num_ops": 0, "Append_min": 0, "Append_max": 0, "Append_mean": 0, "Append_25th_percentile": 0, "Append_median": 0, "Append_75th_percentile": 0, "Append_90th_percentile": 0, "Append_95th_percentile": 0, "Append_98th_percentile": 0, "Append_99th_percentile": 0, "Append_99.9th_percentile": 0, "Bulkload_count": 0, "Bulkload_mean_rate": 0.0, "Bulkload_1min_rate": 0.0, "Bulkload_5min_rate": 0.0, "Bulkload_15min_rate": 0.0, "Bulkload_num_ops": 0, "Bulkload_min": 0, "Bulkload_max": 0, "Bulkload_mean": 0, "Bulkload_25th_percentile": 0, "Bulkload_median": 0, "Bulkload_75th_percentile": 0, "Bulkload_90th_percentile": 0, "Bulkload_95th_percentile": 0, "Bulkload_98th_percentile": 0, "Bulkload_99th_percentile": 0, "Bulkload_99.9th_percentile": 0 } ] } 2024-11-11T16:26:11,147 WARN [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40215 {}] master.MasterRpcServices(700): 16b413a53992,43811,1731342350126 reported a fatal error: ***** ABORTING region server 16b413a53992,43811,1731342350126: testing ***** 2024-11-11T16:26:11,151 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,43811,1731342350126' ***** 2024-11-11T16:26:11,151 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: testing 2024-11-11T16:26:11,152 INFO [RS:2;16b413a53992:43811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:26:11,152 INFO [RS:2;16b413a53992:43811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager abruptly. 2024-11-11T16:26:11,152 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:26:11,152 INFO [RS:2;16b413a53992:43811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager abruptly. 2024-11-11T16:26:11,152 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(3091): Received CLOSE for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,153 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(956): aborting server 16b413a53992,43811,1731342350126 2024-11-11T16:26:11,153 INFO [RS:2;16b413a53992:43811 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:26:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43519 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Get size: 140 connection: 172.17.0.2:38098 deadline: 1731342431153, exception=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43811 startCode=1731342350126. As of locationSeqNum=12. 2024-11-11T16:26:11,153 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:11,153 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,153 INFO [RS:2;16b413a53992:43811 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;16b413a53992:43811. 2024-11-11T16:26:11,154 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,154 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:11,154 DEBUG [RS:2;16b413a53992:43811 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:26:11,154 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,154 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43519,1731342349897, seqNum=5 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43519,1731342349897, seqNum=5, error=org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43811 startCode=1731342350126. As of locationSeqNum=12. 2024-11-11T16:26:11,154 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43519,1731342349897, seqNum=5 is org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43811 startCode=1731342350126. As of locationSeqNum=12. 2024-11-11T16:26:11,154 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(84): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43519,1731342349897, seqNum=5 with the new location region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=12 constructed by org.apache.hadoop.hbase.exceptions.RegionMovedException: Region moved to: hostname=16b413a53992 port=43811 startCode=1731342350126. As of locationSeqNum=12. 2024-11-11T16:26:11,154 DEBUG [RS:2;16b413a53992:43811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:11,155 INFO [RS:2;16b413a53992:43811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:26:11,155 INFO [RS:2;16b413a53992:43811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:26:11,155 INFO [RS:2;16b413a53992:43811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:26:11,155 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T16:26:11,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:11,157 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T16:26:11,157 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 9e5cca078c8f306f6c3dea9fad229919=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.} 2024-11-11T16:26:11,157 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:26:11,157 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:26:11,157 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting 2024-11-11T16:26:11,157 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:26:11,157 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:26:11,157 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting 2024-11-11T16:26:11,158 DEBUG [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:11,158 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:26:11,158 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 from cache 2024-11-11T16:26:11,158 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,159 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342371153Running coprocessor pre-close hooks at 1731342371153Disabling compacts and flushes for region at 1731342371153Disabling writes for close at 1731342371154 (+1 ms)Writing region close event to WAL at 1731342371158 (+4 ms)Running coprocessor post-close hooks at 1731342371158Closed at 1731342371158 2024-11-11T16:26:11,159 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:11,159 ERROR [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1960): Memstore data size is 5811 in region hbase:meta,,1.1588230740 2024-11-11T16:26:11,159 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T16:26:11,159 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:26:11,159 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342371157Running coprocessor pre-close hooks at 1731342371157Disabling compacts and flushes for region at 1731342371157Disabling writes for close at 1731342371157Writing region close event to WAL at 1731342371159 (+2 ms)Running coprocessor post-close hooks at 1731342371159Closed at 1731342371159 2024-11-11T16:26:11,159 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T16:26:11,196 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:11,265 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:26:11,267 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1] 2024-11-11T16:26:11,268 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processRequest(ServerRpcConnection.java:564) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.ServerRpcConnection.processOneRpc(ServerRpcConnection.java:364) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyServerRpcConnection.process(NettyServerRpcConnection.java:89) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:56) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder.channelRead0(NettyRpcServerRequestDecoder.java:31) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:99) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:11,269 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1, error=org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting 2024-11-11T16:26:11,269 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 is org.apache.hadoop.hbase.regionserver.RegionServerAbortedException: Server 16b413a53992,43811,1731342350126 aborting 2024-11-11T16:26:11,269 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 from cache 2024-11-11T16:26:11,358 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,43811,1731342350126; all regions closed. 2024-11-11T16:26:11,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741836_1012 (size=3561) 2024-11-11T16:26:11,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741836_1012 (size=3561) 2024-11-11T16:26:11,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741833_1009 (size=1404) 2024-11-11T16:26:11,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741833_1009 (size=1404) 2024-11-11T16:26:11,369 DEBUG [RS:2;16b413a53992:43811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:11,369 INFO [RS:2;16b413a53992:43811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:11,369 INFO [RS:2;16b413a53992:43811 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:26:11,369 INFO [RS:2;16b413a53992:43811 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T16:26:11,370 INFO [RS:2;16b413a53992:43811 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:26:11,370 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:26:11,370 INFO [RS:2;16b413a53992:43811 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43811 2024-11-11T16:26:11,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,43811,1731342350126 2024-11-11T16:26:11,380 INFO [RS:2;16b413a53992:43811 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:26:11,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:26:11,382 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,43811,1731342350126] 2024-11-11T16:26:11,384 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,43811,1731342350126 already deleted, retry=false 2024-11-11T16:26:11,384 INFO [RegionServerTracker-0 {}] master.ServerManager(695): Processing expiration of 16b413a53992,43811,1731342350126 on 16b413a53992,40215,1731342348830 2024-11-11T16:26:11,391 DEBUG [RegionServerTracker-0 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=false; ServerCrashProcedure 16b413a53992,43811,1731342350126, splitWal=true, meta=true 2024-11-11T16:26:11,393 INFO [RegionServerTracker-0 {}] assignment.AssignmentManager(1991): Scheduled ServerCrashProcedure pid=13 for 16b413a53992,43811,1731342350126 (carryingMeta=true) 16b413a53992,43811,1731342350126/CRASHED/regionCount=2/lock=java.util.concurrent.locks.ReentrantReadWriteLock@87e4e5b[Write locks = 1, Read locks = 0], oldState=ONLINE. 2024-11-11T16:26:11,394 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(169): Start pid=13, state=RUNNABLE:SERVER_CRASH_START, hasLock=true; ServerCrashProcedure 16b413a53992,43811,1731342350126, splitWal=true, meta=true 2024-11-11T16:26:11,396 INFO [PEWorker-2 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_META_LOGS, hasLock=true; ServerCrashProcedure 16b413a53992,43811,1731342350126, splitWal=true, meta=true, isMeta: true 2024-11-11T16:26:11,398 DEBUG [PEWorker-2 {}] master.MasterWalManager(329): Renamed region directory: hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting 2024-11-11T16:26:11,399 INFO [PEWorker-2 {}] master.SplitWALManager(105): 16b413a53992,43811,1731342350126 WAL count=1, meta=true 2024-11-11T16:26:11,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta}] 2024-11-11T16:26:11,410 DEBUG [PEWorker-4 {}] master.SplitWALManager(158): Acquired split WAL worker=16b413a53992,42465,1731342350046 2024-11-11T16:26:11,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, worker=16b413a53992,42465,1731342350046}] 2024-11-11T16:26:11,475 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:26:11,476 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1] 2024-11-11T16:26:11,478 WARN [RPCClient-NioEventLoopGroup-6-3 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 16b413a53992:43811 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 16b413a53992/172.17.0.2:43811 Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel.doFinishConnect(NioSocketChannel.java:336) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.finishConnect(AbstractNioChannel.java:339) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:776) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:11,479 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1, error=java.net.ConnectException: Call to address=16b413a53992:43811 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 16b413a53992/172.17.0.2:43811 2024-11-11T16:26:11,479 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 is java.net.ConnectException: Connection refused 2024-11-11T16:26:11,479 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 from cache 2024-11-11T16:26:11,479 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.FailedServers(52): Added failed server with address 16b413a53992:43811 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: Connection refused: 16b413a53992/172.17.0.2:43811 2024-11-11T16:26:11,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:11,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43811-0x1002fa9b94b0003, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:11,484 INFO [RS:2;16b413a53992:43811 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:26:11,484 INFO [RS:2;16b413a53992:43811 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,43811,1731342350126; zookeeper connection closed. 2024-11-11T16:26:11,484 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1fcfaa29 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1fcfaa29 2024-11-11T16:26:11,579 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:26:11,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42031, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:26:11,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42465 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=15 2024-11-11T16:26:11,619 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, size=3.5 K (3561bytes) 2024-11-11T16:26:11,619 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta 2024-11-11T16:26:11,620 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta after 0ms 2024-11-11T16:26:11,623 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:11,623 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(310): Open hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta took 5ms 2024-11-11T16:26:11,638 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(352): Last flushed sequenceid for 1588230740: last_flushed_sequence_id: 18446744073709551615 store_sequence_id { family_name: "info" sequence_id: 5 } store_sequence_id { family_name: "ns" sequence_id: 3 } store_sequence_id { family_name: "rep_barrier" sequence_id: 18446744073709551615 } store_sequence_id { family_name: "table" sequence_id: 6 } 2024-11-11T16:26:11,639 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta so closing down 2024-11-11T16:26:11,639 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:11,641 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp 2024-11-11T16:26:11,643 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp 2024-11-11T16:26:11,644 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741892_1070 (size=3346) 2024-11-11T16:26:11,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741892_1070 (size=3346) 2024-11-11T16:26:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741892_1070 (size=3346) 2024-11-11T16:26:11,656 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:11,658 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-11T16:26:11,658 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(425): Processed 16 edits across 1 Regions in 30 ms; skipped=1; WAL=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false 2024-11-11T16:26:11,658 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, journal: Splitting hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, size=3.5 K (3561bytes) at 1731342371619Finishing writing output for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta so closing down at 1731342371639 (+20 ms)Creating recovered edits writer path=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp at 1731342371643 (+4 ms)3 split writer threads finished at 1731342371644 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp (wrote 15 edits, skipped 0 edits in 0 ms) at 1731342371657 (+13 ms)Rename recovered edits hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000004-16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta.temp to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 at 1731342371658 (+1 ms)Processed 16 edits across 1 Regions in 30 ms; skipped=1; WAL=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, size=3.5 K, length=3561, corrupted=false, cancelled=false at 1731342371658 2024-11-11T16:26:11,659 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta 2024-11-11T16:26:11,661 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-11T16:26:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(4169): Remote procedure done, pid=15 2024-11-11T16:26:11,670 INFO [PEWorker-5 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs 2024-11-11T16:26:11,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=15, resume processing ppid=14 2024-11-11T16:26:11,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, ppid=14, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, worker=16b413a53992,42465,1731342350046 in 258 msec 2024-11-11T16:26:11,675 DEBUG [PEWorker-5 {}] master.SplitWALManager(172): Release split WAL worker=16b413a53992,42465,1731342350046 2024-11-11T16:26:11,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-11T16:26:11,680 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 16b413a53992%2C43811%2C1731342350126.meta.1731342352730.meta, worker=16b413a53992,42465,1731342350046 in 274 msec 2024-11-11T16:26:11,686 INFO [PEWorker-2 {}] master.SplitLogManager(171): hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting dir is empty, no logs to split. 2024-11-11T16:26:11,687 INFO [PEWorker-2 {}] master.SplitWALManager(105): 16b413a53992,43811,1731342350126 WAL count=0, meta=true 2024-11-11T16:26:11,687 DEBUG [PEWorker-2 {}] procedure.ServerCrashProcedure(329): Check if 16b413a53992,43811,1731342350126 WAL splitting is done? wals=0, meta=true 2024-11-11T16:26:11,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T16:26:11,696 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:26:11,697 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-11T16:26:11,785 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:26:11,787 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1] 2024-11-11T16:26:11,787 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.AbstractRpcClient(357): Not trying to connect to 16b413a53992:43811 this server is in the failed servers list 2024-11-11T16:26:11,788 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=16b413a53992:43811 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:11,788 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:11,788 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 from cache 2024-11-11T16:26:11,848 DEBUG [16b413a53992:40215 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=2, allServersCount=2 2024-11-11T16:26:11,848 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:26:11,849 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:26:11,849 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:26:11,849 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:26:11,849 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:26:11,849 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:26:11,849 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:26:11,849 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:26:11,850 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b413a53992,43519,1731342349897 2024-11-11T16:26:11,854 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,43519,1731342349897, state=OPENING 2024-11-11T16:26:11,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:11,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:11,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:11,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:11,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:11,860 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:11,861 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=16, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:26:11,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,43519,1731342349897}] 2024-11-11T16:26:12,024 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T16:26:12,024 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:12,024 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T16:26:12,026 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C43519%2C1731342349897.meta, suffix=.meta, logDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897, archiveDir=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs, maxLogs=32 2024-11-11T16:26:12,041 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897/16b413a53992%2C43519%2C1731342349897.meta.1731342372027.meta, exclude list is [], retry=0 2024-11-11T16:26:12,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:12,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:12,044 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:12,053 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43519,1731342349897/16b413a53992%2C43519%2C1731342349897.meta.1731342372027.meta 2024-11-11T16:26:12,054 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:12,054 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:12,055 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T16:26:12,055 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T16:26:12,055 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T16:26:12,055 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T16:26:12,055 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:12,055 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T16:26:12,056 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T16:26:12,057 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:26:12,058 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:26:12,058 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:12,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:26:12,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:26:12,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:26:12,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:12,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:26:12,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:26:12,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:26:12,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:12,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:26:12,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:26:12,064 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:26:12,064 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:12,065 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:26:12,065 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:26:12,066 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:26:12,067 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740 2024-11-11T16:26:12,068 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-11T16:26:12,070 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:12,072 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5793): Applied 40, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=18, path=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-11T16:26:12,073 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.67 KB heapSize=9.66 KB 2024-11-11T16:26:12,101 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/425df576435944a088d5fcd031d19212 is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919./info:regioninfo/1731342371059/Put/seqid=0 2024-11-11T16:26:12,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741894_1072 (size=11177) 2024-11-11T16:26:12,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741894_1072 (size=11177) 2024-11-11T16:26:12,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741894_1072 (size=11177) 2024-11-11T16:26:12,116 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.46 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/425df576435944a088d5fcd031d19212 2024-11-11T16:26:12,140 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/ns/65ab2067d23647769b85b4da074f2bda is 43, key is default/ns:d/1731342352936/Put/seqid=0 2024-11-11T16:26:12,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741895_1073 (size=5153) 2024-11-11T16:26:12,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741895_1073 (size=5153) 2024-11-11T16:26:12,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741895_1073 (size=5153) 2024-11-11T16:26:12,149 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/ns/65ab2067d23647769b85b4da074f2bda 2024-11-11T16:26:12,174 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/table/e9e54e731f86426da0cc2cfb8a640238 is 78, key is testReplayEditsAfterRegionMovedWithMultiCF/table:state/1731342369169/Put/seqid=0 2024-11-11T16:26:12,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741896_1074 (size=5431) 2024-11-11T16:26:12,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741896_1074 (size=5431) 2024-11-11T16:26:12,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741896_1074 (size=5431) 2024-11-11T16:26:12,186 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=148 B at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/table/e9e54e731f86426da0cc2cfb8a640238 2024-11-11T16:26:12,193 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/425df576435944a088d5fcd031d19212 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/info/425df576435944a088d5fcd031d19212 2024-11-11T16:26:12,201 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/info/425df576435944a088d5fcd031d19212, entries=36, sequenceid=18, filesize=10.9 K 2024-11-11T16:26:12,202 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/ns/65ab2067d23647769b85b4da074f2bda as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/ns/65ab2067d23647769b85b4da074f2bda 2024-11-11T16:26:12,208 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/ns/65ab2067d23647769b85b4da074f2bda, entries=2, sequenceid=18, filesize=5.0 K 2024-11-11T16:26:12,210 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/table/e9e54e731f86426da0cc2cfb8a640238 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/table/e9e54e731f86426da0cc2cfb8a640238 2024-11-11T16:26:12,215 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/table/e9e54e731f86426da0cc2cfb8a640238, entries=2, sequenceid=18, filesize=5.3 K 2024-11-11T16:26:12,216 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=18, compaction requested=false; wal=null 2024-11-11T16:26:12,216 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/0000000000000000018 2024-11-11T16:26:12,218 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:26:12,218 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:26:12,219 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:26:12,220 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:26:12,223 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/18.seqid, newMaxSeqId=18, maxSeqId=1 2024-11-11T16:26:12,224 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=19; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61795191, jitterRate=-0.07917989790439606}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:26:12,224 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T16:26:12,225 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731342372056Writing region info on filesystem at 1731342372056Initializing all the Stores at 1731342372057 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342372057Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342372057Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342372057Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342372057Obtaining lock to block concurrent updates at 1731342372073 (+16 ms)Preparing flush snapshotting stores in 1588230740 at 1731342372073Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=5811, getHeapSize=9832, getOffHeapSize=0, getCellsCount=40 at 1731342372073Flushing stores of hbase:meta,,1.1588230740 at 1731342372073Flushing 1588230740/info: creating writer at 1731342372073Flushing 1588230740/info: appending metadata at 1731342372101 (+28 ms)Flushing 1588230740/info: closing flushed file at 1731342372101Flushing 1588230740/ns: creating writer at 1731342372124 (+23 ms)Flushing 1588230740/ns: appending metadata at 1731342372140 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1731342372140Flushing 1588230740/table: creating writer at 1731342372157 (+17 ms)Flushing 1588230740/table: appending metadata at 1731342372173 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731342372173Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c644d0b: reopening flushed file at 1731342372193 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@233d81af: reopening flushed file at 1731342372201 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58c41483: reopening flushed file at 1731342372209 (+8 ms)Finished flush of dataSize ~5.67 KB/5811, heapSize ~9.37 KB/9592, currentSize=0 B/0 for 1588230740 in 144ms, sequenceid=18, compaction requested=false; wal=null at 1731342372216 (+7 ms)Cleaning up temporary data from old regions at 1731342372218 (+2 ms)Running coprocessor post-open hooks at 1731342372224 (+6 ms)Region opened successfully at 1731342372225 (+1 ms) 2024-11-11T16:26:12,229 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=17, masterSystemTime=1731342372018 2024-11-11T16:26:12,231 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T16:26:12,231 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=17}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T16:26:12,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=16 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=19, regionLocation=16b413a53992,43519,1731342349897 2024-11-11T16:26:12,233 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,43519,1731342349897, state=OPEN 2024-11-11T16:26:12,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:12,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:12,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:26:12,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:12,235 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:12,235 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=17, ppid=16, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b413a53992,43519,1731342349897 2024-11-11T16:26:12,236 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:26:12,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=16 2024-11-11T16:26:12,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=16, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,43519,1731342349897 in 374 msec 2024-11-11T16:26:12,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=13 2024-11-11T16:26:12,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 546 msec 2024-11-11T16:26:12,244 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(207): 16b413a53992,43811,1731342350126 had 2 regions 2024-11-11T16:26:12,245 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(339): Splitting WALs pid=13, state=RUNNABLE:SERVER_CRASH_SPLIT_LOGS, hasLock=true; ServerCrashProcedure 16b413a53992,43811,1731342350126, splitWal=true, meta=true, isMeta: false 2024-11-11T16:26:12,248 INFO [PEWorker-4 {}] master.SplitWALManager(105): 16b413a53992,43811,1731342350126 WAL count=1, meta=false 2024-11-11T16:26:12,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=18, ppid=13, state=RUNNABLE:ACQUIRE_SPLIT_WAL_WORKER, hasLock=false; SplitWALProcedure 16b413a53992%2C43811%2C1731342350126.1731342352124}] 2024-11-11T16:26:12,251 DEBUG [PEWorker-3 {}] master.SplitWALManager(158): Acquired split WAL worker=16b413a53992,43519,1731342349897 2024-11-11T16:26:12,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE, hasLock=false; SplitWALRemoteProcedure 16b413a53992%2C43811%2C1731342350126.1731342352124, worker=16b413a53992,43519,1731342349897}] 2024-11-11T16:26:12,295 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:26:12,296 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43519,1731342349897, seqNum=-1] 2024-11-11T16:26:12,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43519 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SplitWALCallable, pid=19 2024-11-11T16:26:12,416 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.AbstractRpcClient(357): Not trying to connect to 16b413a53992:43811 this server is in the failed servers list 2024-11-11T16:26:12,416 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=12 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=12, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=16b413a53992:43811 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,416 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=12 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,416 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=12 from cache 2024-11-11T16:26:12,434 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124, size=1.4 K (1404bytes) 2024-11-11T16:26:12,434 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 2024-11-11T16:26:12,435 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 after 1ms 2024-11-11T16:26:12,441 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:12,442 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(310): Open hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 took 8ms 2024-11-11T16:26:12,457 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(352): Last flushed sequenceid for 9e5cca078c8f306f6c3dea9fad229919: last_flushed_sequence_id: 12 store_sequence_id { family_name: "cf1" sequence_id: 12 } store_sequence_id { family_name: "cf2" sequence_id: 12 } 2024-11-11T16:26:12,458 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 so closing down 2024-11-11T16:26:12,458 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:12,458 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:12,458 INFO [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(425): Processed 6 edits across 0 Regions in 6 ms; skipped=6; WAL=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124, size=1.4 K, length=1404, corrupted=false, cancelled=false 2024-11-11T16:26:12,458 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124, journal: Splitting hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124, size=1.4 K (1404bytes) at 1731342372434Finishing writing output for hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 so closing down at 1731342372458 (+24 ms)3 split writer threads finished at 1731342372458Processed 6 edits across 0 Regions in 6 ms; skipped=6; WAL=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124, size=1.4 K, length=1404, corrupted=false, cancelled=false at 1731342372458 2024-11-11T16:26:12,458 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.SplitLogWorker(218): Done splitting WAL hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 2024-11-11T16:26:12,458 DEBUG [RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0-0 {event_type=RS_LOG_REPLAY, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-11T16:26:12,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40215 {}] master.HMaster(4169): Remote procedure done, pid=19 2024-11-11T16:26:12,478 INFO [PEWorker-5 {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting/16b413a53992%2C43811%2C1731342350126.1731342352124 to hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs 2024-11-11T16:26:12,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-11T16:26:12,483 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; SplitWALRemoteProcedure 16b413a53992%2C43811%2C1731342350126.1731342352124, worker=16b413a53992,43519,1731342349897 in 226 msec 2024-11-11T16:26:12,485 DEBUG [PEWorker-2 {}] master.SplitWALManager(172): Release split WAL worker=16b413a53992,43519,1731342349897 2024-11-11T16:26:12,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=18, resume processing ppid=13 2024-11-11T16:26:12,489 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, ppid=13, state=SUCCESS, hasLock=false; SplitWALProcedure 16b413a53992%2C43811%2C1731342350126.1731342352124, worker=16b413a53992,43519,1731342349897 in 237 msec 2024-11-11T16:26:12,493 INFO [PEWorker-2 {}] master.SplitLogManager(171): hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/WALs/16b413a53992,43811,1731342350126-splitting dir is empty, no logs to split. 2024-11-11T16:26:12,493 INFO [PEWorker-2 {}] master.SplitWALManager(105): 16b413a53992,43811,1731342350126 WAL count=0, meta=false 2024-11-11T16:26:12,493 DEBUG [PEWorker-2 {}] procedure.ServerCrashProcedure(329): Check if 16b413a53992,43811,1731342350126 WAL splitting is done? wals=0, meta=false 2024-11-11T16:26:12,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN}] 2024-11-11T16:26:12,498 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN 2024-11-11T16:26:12,501 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN; state=OPEN, location=null; forceNewPlan=true, retain=false 2024-11-11T16:26:12,633 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18] 2024-11-11T16:26:12,633 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(357): Not trying to connect to 16b413a53992:43811 this server is in the failed servers list 2024-11-11T16:26:12,634 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=16b413a53992:43811 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,634 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,634 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 from cache 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:26:12,652 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:26:12,652 INFO [16b413a53992:40215 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:26:12,652 DEBUG [16b413a53992:40215 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:26:12,653 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPENING, regionLocation=16b413a53992,42465,1731342350046 2024-11-11T16:26:12,654 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.NettyRpcConnection$2(409): Exception encountered while connecting to the server 16b413a53992:43811 org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 16b413a53992/172.17.0.2:43811 Caused by: java.net.ConnectException: finishConnect(..) failed: Connection refused at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.newConnectException0(Errors.java:166) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Errors.handleConnectErrno(Errors.java:131) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.unix.Socket.finishConnect(Socket.java:359) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.doFinishConnect(AbstractEpollChannel.java:715) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:692) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:12,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(64): Try updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 , the old value is region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1, error=java.net.ConnectException: Call to address=16b413a53992:43811 failed on connection exception: org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 16b413a53992/172.17.0.2:43811 2024-11-11T16:26:12,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 is java.net.ConnectException: finishConnect(..) failed: Connection refused 2024-11-11T16:26:12,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncRegionLocatorHelper(88): Try removing region=hbase:meta,,1.1588230740, hostname=16b413a53992,43811,1731342350126, seqNum=-1 from cache 2024-11-11T16:26:12,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.FailedServers(52): Added failed server with address 16b413a53992:43811 to list caused by org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AnnotatedConnectException: finishConnect(..) failed: Connection refused: 16b413a53992/172.17.0.2:43811 2024-11-11T16:26:12,766 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:26:12,766 DEBUG [Async-Client-Retry-Timer-pool-0 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,43519,1731342349897, seqNum=-1] 2024-11-11T16:26:12,766 DEBUG [Async-Client-Retry-Timer-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:26:12,769 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40397, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:26:12,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=13, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN because future has completed 2024-11-11T16:26:12,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,42465,1731342350046}] 2024-11-11T16:26:12,946 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(132): Open testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:12,946 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7752): Opening region: {ENCODED => 9e5cca078c8f306f6c3dea9fad229919, NAME => 'testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:12,947 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterRegionMovedWithMultiCF 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:12,947 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(898): Instantiated testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:12,947 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7794): checking encryption for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:12,948 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(7797): checking classloading for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:12,950 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf1 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:12,951 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18] 2024-11-11T16:26:12,952 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf1 2024-11-11T16:26:12,952 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:12,952 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(357): Not trying to connect to 16b413a53992:43811 this server is in the failed servers list 2024-11-11T16:26:12,952 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 , the old value is region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18, error=org.apache.hadoop.hbase.ipc.FailedServerException: Call to address=16b413a53992:43811 failed on local exception: org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,952 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 is org.apache.hadoop.hbase.ipc.FailedServerException: This server is in the failed servers list: 16b413a53992:43811 2024-11-11T16:26:12,954 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,43811,1731342350126, seqNum=18 from cache 2024-11-11T16:26:12,969 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf1/a17ba28e32794ff9832d967404e05609 2024-11-11T16:26:12,969 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf1, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:12,970 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf2 of region 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:12,971 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9e5cca078c8f306f6c3dea9fad229919 columnFamilyName cf2 2024-11-11T16:26:12,971 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,014 DEBUG [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/cf2/ed56f31d05b14deaac9a2bf1e6ba24cd 2024-11-11T16:26:13,014 INFO [StoreOpener-9e5cca078c8f306f6c3dea9fad229919-1 {}] regionserver.HStore(327): Store=9e5cca078c8f306f6c3dea9fad229919/cf2, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,014 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1038): replaying wal for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,016 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,022 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,023 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1048): stopping wal replay for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,023 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1060): Cleaning up temporary data for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,024 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterRegionMovedWithMultiCF descriptor;using region.getMemStoreFlushHeapSize/# of families (64.0 M)) instead. 2024-11-11T16:26:13,029 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1093): writing seq id for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,031 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1114): Opened 9e5cca078c8f306f6c3dea9fad229919; next sequenceid=18; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66684898, jitterRate=-0.006317585706710815}}}, FlushLargeStoresPolicy{flushSizeLowerBound=67108864} 2024-11-11T16:26:13,031 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:13,032 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegion(1006): Region open journal for 9e5cca078c8f306f6c3dea9fad229919: Running coprocessor pre-open hook at 1731342372948Writing region info on filesystem at 1731342372948Initializing all the Stores at 1731342372949 (+1 ms)Instantiating store for column family {NAME => 'cf1', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342372949Instantiating store for column family {NAME => 'cf2', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342372950 (+1 ms)Cleaning up temporary data from old regions at 1731342373023 (+73 ms)Running coprocessor post-open hooks at 1731342373031 (+8 ms)Region opened successfully at 1731342373032 (+1 ms) 2024-11-11T16:26:13,035 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2236): Post open deploy tasks for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., pid=21, masterSystemTime=1731342372932 2024-11-11T16:26:13,045 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] regionserver.HRegionServer(2266): Finished post open deploy task for testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:13,045 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=21}] handler.AssignRegionHandler(153): Opened testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:13,049 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=9e5cca078c8f306f6c3dea9fad229919, regionState=OPEN, openSeqNum=18, regionLocation=16b413a53992,42465,1731342350046 2024-11-11T16:26:13,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=20, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,42465,1731342350046 because future has completed 2024-11-11T16:26:13,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=20 2024-11-11T16:26:13,071 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=20, state=SUCCESS, hasLock=false; OpenRegionProcedure 9e5cca078c8f306f6c3dea9fad229919, server=16b413a53992,42465,1731342350046 in 294 msec 2024-11-11T16:26:13,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=13 2024-11-11T16:26:13,077 INFO [PEWorker-4 {}] procedure.ServerCrashProcedure(291): removed crashed server 16b413a53992,43811,1731342350126 after splitting done 2024-11-11T16:26:13,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=13, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testReplayEditsAfterRegionMovedWithMultiCF, region=9e5cca078c8f306f6c3dea9fad229919, ASSIGN in 574 msec 2024-11-11T16:26:13,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; ServerCrashProcedure 16b413a53992,43811,1731342350126, splitWal=true, meta=true in 1.6920 sec 2024-11-11T16:26:13,471 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testReplayEditsAfterRegionMovedWithMultiCF', row='r1', locateType=CURRENT is [region=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919., hostname=16b413a53992,42465,1731342350046, seqNum=18] 2024-11-11T16:26:13,471 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:26:13,474 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34340, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:26:13,493 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterRegionMovedWithMultiCF Thread=400 (was 399) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1813202362_22 at /127.0.0.1:55044 [Waiting for operation #33] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1499031871_22 at /127.0.0.1:32996 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/16b413a53992:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1813202362_22 at /127.0.0.1:54054 [Waiting for operation #27] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/16b413a53992:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-0-hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553-prefix:16b413a53992,43519,1731342349897.meta java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/16b413a53992:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-890883998_22 at /127.0.0.1:32898 [Waiting for operation #30] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1499031871_22 at /127.0.0.1:55152 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1499031871_22 at /127.0.0.1:54156 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741893_1071] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Abort regionserver monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1026 (was 993) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=691 (was 716), ProcessCount=11 (was 11), AvailableMemoryMB=2297 (was 2442) 2024-11-11T16:26:13,496 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1026 is superior to 1024 2024-11-11T16:26:13,514 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=400, OpenFileDescriptor=1026, MaxFileDescriptor=1048576, SystemLoadAverage=691, ProcessCount=11, AvailableMemoryMB=2296 2024-11-11T16:26:13,514 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1026 is superior to 1024 2024-11-11T16:26:13,536 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:13,539 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:13,540 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:13,543 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-93064937, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-93064937, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:13,559 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-93064937/hregion-93064937.1731342373543, exclude list is [], retry=0 2024-11-11T16:26:13,563 DEBUG [AsyncFSWAL-20-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:13,564 DEBUG [AsyncFSWAL-20-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:13,566 DEBUG [AsyncFSWAL-20-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:13,573 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-93064937/hregion-93064937.1731342373543 2024-11-11T16:26:13,573 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:13,573 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => d0f5b659a114aa26bddb48d8eb93699f, NAME => 'testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:13,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741898_1076 (size=67) 2024-11-11T16:26:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741898_1076 (size=67) 2024-11-11T16:26:13,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741898_1076 (size=67) 2024-11-11T16:26:13,587 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:13,590 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,592 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName a 2024-11-11T16:26:13,592 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,592 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,593 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,594 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName b 2024-11-11T16:26:13,594 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,595 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,595 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,596 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName c 2024-11-11T16:26:13,596 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,597 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,597 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,598 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,598 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,599 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,599 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,600 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:13,601 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,603 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:13,604 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d0f5b659a114aa26bddb48d8eb93699f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60459963, jitterRate=-0.09907634556293488}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:13,605 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d0f5b659a114aa26bddb48d8eb93699f: Writing region info on filesystem at 1731342373587Initializing all the Stores at 1731342373589 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373589Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373589Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373589Cleaning up temporary data from old regions at 1731342373599 (+10 ms)Region opened successfully at 1731342373605 (+6 ms) 2024-11-11T16:26:13,605 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d0f5b659a114aa26bddb48d8eb93699f, disabling compactions & flushes 2024-11-11T16:26:13,605 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:13,605 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:13,605 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. after waiting 0 ms 2024-11-11T16:26:13,605 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:13,606 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:13,606 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d0f5b659a114aa26bddb48d8eb93699f: Waiting for close lock at 1731342373605Disabling compacts and flushes for region at 1731342373605Disabling writes for close at 1731342373605Writing region close event to WAL at 1731342373606 (+1 ms)Closed at 1731342373606 2024-11-11T16:26:13,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741897_1075 (size=95) 2024-11-11T16:26:13,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741897_1075 (size=95) 2024-11-11T16:26:13,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741897_1075 (size=95) 2024-11-11T16:26:13,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:13,638 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-93064937:(num 1731342373543) 2024-11-11T16:26:13,638 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:13,640 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:13,657 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, exclude list is [], retry=0 2024-11-11T16:26:13,661 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:13,661 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:13,662 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:13,671 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 2024-11-11T16:26:13,672 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:13,672 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d0f5b659a114aa26bddb48d8eb93699f, NAME => 'testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:13,672 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:13,672 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,672 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,697 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,701 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName a 2024-11-11T16:26:13,701 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,705 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,705 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,706 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName b 2024-11-11T16:26:13,706 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,707 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,707 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,708 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName c 2024-11-11T16:26:13,708 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:13,709 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:13,709 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,712 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,714 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,720 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,720 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,721 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:13,729 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:13,736 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d0f5b659a114aa26bddb48d8eb93699f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75159630, jitterRate=0.11996576189994812}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:13,736 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d0f5b659a114aa26bddb48d8eb93699f: Writing region info on filesystem at 1731342373672Initializing all the Stores at 1731342373689 (+17 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373689Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373692 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342373692Cleaning up temporary data from old regions at 1731342373720 (+28 ms)Region opened successfully at 1731342373736 (+16 ms) 2024-11-11T16:26:13,930 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d0f5b659a114aa26bddb48d8eb93699f 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-11T16:26:13,995 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/a/9a952c1a892a4fbc81d78767ccfc60af is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731342373737/Put/seqid=0 2024-11-11T16:26:14,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741900_1078 (size=5958) 2024-11-11T16:26:14,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741900_1078 (size=5958) 2024-11-11T16:26:14,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741900_1078 (size=5958) 2024-11-11T16:26:14,483 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/a/9a952c1a892a4fbc81d78767ccfc60af 2024-11-11T16:26:14,526 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/bf0d6b0f901d4d73b87a92c5ee36efec is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731342373801/Put/seqid=0 2024-11-11T16:26:14,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741901_1079 (size=5958) 2024-11-11T16:26:14,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741901_1079 (size=5958) 2024-11-11T16:26:14,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741901_1079 (size=5958) 2024-11-11T16:26:14,581 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/bf0d6b0f901d4d73b87a92c5ee36efec 2024-11-11T16:26:14,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741836_1012 (size=3561) 2024-11-11T16:26:14,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741833_1009 (size=1404) 2024-11-11T16:26:14,614 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/c/d3a53179862a4c6d909cad34c67af8a4 is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731342373856/Put/seqid=0 2024-11-11T16:26:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741902_1080 (size=5958) 2024-11-11T16:26:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741902_1080 (size=5958) 2024-11-11T16:26:14,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741902_1080 (size=5958) 2024-11-11T16:26:14,630 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/c/d3a53179862a4c6d909cad34c67af8a4 2024-11-11T16:26:14,636 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/a/9a952c1a892a4fbc81d78767ccfc60af as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/a/9a952c1a892a4fbc81d78767ccfc60af 2024-11-11T16:26:14,642 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/a/9a952c1a892a4fbc81d78767ccfc60af, entries=10, sequenceid=33, filesize=5.8 K 2024-11-11T16:26:14,643 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/bf0d6b0f901d4d73b87a92c5ee36efec as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/bf0d6b0f901d4d73b87a92c5ee36efec 2024-11-11T16:26:14,648 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/bf0d6b0f901d4d73b87a92c5ee36efec, entries=10, sequenceid=33, filesize=5.8 K 2024-11-11T16:26:14,649 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/c/d3a53179862a4c6d909cad34c67af8a4 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/c/d3a53179862a4c6d909cad34c67af8a4 2024-11-11T16:26:14,655 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/c/d3a53179862a4c6d909cad34c67af8a4, entries=10, sequenceid=33, filesize=5.8 K 2024-11-11T16:26:14,657 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T16:26:14,658 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for d0f5b659a114aa26bddb48d8eb93699f in 729ms, sequenceid=33, compaction requested=false 2024-11-11T16:26:14,658 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for d0f5b659a114aa26bddb48d8eb93699f: 2024-11-11T16:26:14,659 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing d0f5b659a114aa26bddb48d8eb93699f, disabling compactions & flushes 2024-11-11T16:26:14,659 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:14,659 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:14,659 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. after waiting 0 ms 2024-11-11T16:26:14,659 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:14,678 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. 2024-11-11T16:26:14,678 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for d0f5b659a114aa26bddb48d8eb93699f: Waiting for close lock at 1731342374659Disabling compacts and flushes for region at 1731342374659Disabling writes for close at 1731342374659Writing region close event to WAL at 1731342374678 (+19 ms)Closed at 1731342374678 2024-11-11T16:26:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741899_1077 (size=3386) 2024-11-11T16:26:14,709 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 not finished, retry = 0 2024-11-11T16:26:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741899_1077 (size=3386) 2024-11-11T16:26:14,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741899_1077 (size=3386) 2024-11-11T16:26:14,814 DEBUG [Time-limited test {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/bf0d6b0f901d4d73b87a92c5ee36efec to hdfs://localhost:39605/hbase/archive/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/bf0d6b0f901d4d73b87a92c5ee36efec 2024-11-11T16:26:14,834 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, size=3.3 K (3386bytes) 2024-11-11T16:26:14,834 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 2024-11-11T16:26:14,834 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 after 0ms 2024-11-11T16:26:14,837 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:14,837 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 took 4ms 2024-11-11T16:26:14,841 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 so closing down 2024-11-11T16:26:14,841 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:14,845 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731342373641.temp 2024-11-11T16:26:14,847 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp 2024-11-11T16:26:14,848 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741903_1081 (size=2944) 2024-11-11T16:26:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741903_1081 (size=2944) 2024-11-11T16:26:14,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741903_1081 (size=2944) 2024-11-11T16:26:14,860 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:14,862 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 2024-11-11T16:26:14,862 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 24 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, size=3.3 K, length=3386, corrupted=false, cancelled=false 2024-11-11T16:26:14,862 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, journal: Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, size=3.3 K (3386bytes) at 1731342374834Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 so closing down at 1731342374841 (+7 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp at 1731342374847 (+6 ms)3 split writer threads finished at 1731342374848 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731342374861 (+13 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000003-wal.1731342373641.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 at 1731342374862 (+1 ms)Processed 32 edits across 1 Regions in 24 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641, size=3.3 K, length=3386, corrupted=false, cancelled=false at 1731342374862 2024-11-11T16:26:14,869 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342373641 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342373641 2024-11-11T16:26:14,870 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 2024-11-11T16:26:14,870 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:14,872 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:14,894 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342374873, exclude list is [], retry=0 2024-11-11T16:26:14,897 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:14,898 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:14,898 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:14,909 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342374873 2024-11-11T16:26:14,909 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:14,909 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => d0f5b659a114aa26bddb48d8eb93699f, NAME => 'testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:14,910 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:14,910 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,910 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,913 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,914 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName a 2024-11-11T16:26:14,914 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:14,922 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/a/9a952c1a892a4fbc81d78767ccfc60af 2024-11-11T16:26:14,922 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:14,922 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,924 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName b 2024-11-11T16:26:14,924 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:14,925 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:14,925 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,926 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d0f5b659a114aa26bddb48d8eb93699f columnFamilyName c 2024-11-11T16:26:14,926 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:14,951 DEBUG [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/c/d3a53179862a4c6d909cad34c67af8a4 2024-11-11T16:26:14,951 INFO [StoreOpener-d0f5b659a114aa26bddb48d8eb93699f-1 {}] regionserver.HStore(327): Store=d0f5b659a114aa26bddb48d8eb93699f/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:14,951 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,952 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,954 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:14,954 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 2024-11-11T16:26:14,957 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:14,958 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 20, firstSequenceIdInLog=3, maxSequenceIdInLog=32, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 2024-11-11T16:26:14,959 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing d0f5b659a114aa26bddb48d8eb93699f 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-11T16:26:14,977 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/802debb4b53d49a487010afc32a8732d is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731342373801/Put/seqid=0 2024-11-11T16:26:14,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741905_1083 (size=5958) 2024-11-11T16:26:14,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741905_1083 (size=5958) 2024-11-11T16:26:14,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741905_1083 (size=5958) 2024-11-11T16:26:14,989 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=32 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/802debb4b53d49a487010afc32a8732d 2024-11-11T16:26:15,009 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/.tmp/b/802debb4b53d49a487010afc32a8732d as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/802debb4b53d49a487010afc32a8732d 2024-11-11T16:26:15,031 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/b/802debb4b53d49a487010afc32a8732d, entries=10, sequenceid=32, filesize=5.8 K 2024-11-11T16:26:15,032 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for d0f5b659a114aa26bddb48d8eb93699f in 73ms, sequenceid=32, compaction requested=false; wal=null 2024-11-11T16:26:15,033 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/0000000000000000032 2024-11-11T16:26:15,045 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:15,045 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:15,048 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:15,052 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for d0f5b659a114aa26bddb48d8eb93699f 2024-11-11T16:26:15,069 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/d0f5b659a114aa26bddb48d8eb93699f/recovered.edits/33.seqid, newMaxSeqId=33, maxSeqId=1 2024-11-11T16:26:15,077 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened d0f5b659a114aa26bddb48d8eb93699f; next sequenceid=34; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59053157, jitterRate=-0.12003938853740692}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:15,078 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for d0f5b659a114aa26bddb48d8eb93699f: Writing region info on filesystem at 1731342374910Initializing all the Stores at 1731342374911 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342374911Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342374912 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342374912Obtaining lock to block concurrent updates at 1731342374959 (+47 ms)Preparing flush snapshotting stores in d0f5b659a114aa26bddb48d8eb93699f at 1731342374959Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f., syncing WAL and waiting on mvcc, flushsize=dataSize=870, getHeapSize=2320, getOffHeapSize=0, getCellsCount=10 at 1731342374959Flushing stores of testReplayEditsWrittenViaHRegion,,1731342373537.d0f5b659a114aa26bddb48d8eb93699f. at 1731342374959Flushing d0f5b659a114aa26bddb48d8eb93699f/b: creating writer at 1731342374959Flushing d0f5b659a114aa26bddb48d8eb93699f/b: appending metadata at 1731342374976 (+17 ms)Flushing d0f5b659a114aa26bddb48d8eb93699f/b: closing flushed file at 1731342374976Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37d5afd1: reopening flushed file at 1731342375007 (+31 ms)Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for d0f5b659a114aa26bddb48d8eb93699f in 73ms, sequenceid=32, compaction requested=false; wal=null at 1731342375032 (+25 ms)Cleaning up temporary data from old regions at 1731342375045 (+13 ms)Region opened successfully at 1731342375078 (+33 ms) 2024-11-11T16:26:15,116 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterPartialFlush Thread=411 (was 400) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55044 [Waiting for operation #50] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:54054 [Waiting for operation #29] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-20-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:54274 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: java.util.concurrent.ThreadPoolExecutor$Worker@7434d5e4[State = -1, empty queue] java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:32898 [Waiting for operation #44] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55232 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:33116 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741904_1082] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1094 (was 1026) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=691 (was 691), ProcessCount=11 (was 11), AvailableMemoryMB=2243 (was 2296) 2024-11-11T16:26:15,116 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1094 is superior to 1024 2024-11-11T16:26:15,138 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=411, OpenFileDescriptor=1094, MaxFileDescriptor=1048576, SystemLoadAverage=691, ProcessCount=11, AvailableMemoryMB=2242 2024-11-11T16:26:15,138 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1094 is superior to 1024 2024-11-11T16:26:15,160 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:15,162 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:15,163 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:15,166 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-12526583, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-12526583, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:15,185 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-12526583/hregion-12526583.1731342375167, exclude list is [], retry=0 2024-11-11T16:26:15,188 DEBUG [AsyncFSWAL-22-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:15,189 DEBUG [AsyncFSWAL-22-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:15,190 DEBUG [AsyncFSWAL-22-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:15,233 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-12526583/hregion-12526583.1731342375167 2024-11-11T16:26:15,234 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:15,234 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => c8cc0f93157cf70c1af83cf952d59acf, NAME => 'testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsAfterAbortingFlush', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:15,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741907_1085 (size=68) 2024-11-11T16:26:15,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741907_1085 (size=68) 2024-11-11T16:26:15,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741907_1085 (size=68) 2024-11-11T16:26:15,317 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:15,325 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,328 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName a 2024-11-11T16:26:15,328 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,331 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,331 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,333 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName b 2024-11-11T16:26:15,333 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,334 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,334 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,336 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName c 2024-11-11T16:26:15,336 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,336 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,337 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,338 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,338 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,339 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,339 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,340 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:15,342 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,345 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:15,346 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c8cc0f93157cf70c1af83cf952d59acf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62952330, jitterRate=-0.061937183141708374}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:15,347 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c8cc0f93157cf70c1af83cf952d59acf: Writing region info on filesystem at 1731342375318Initializing all the Stores at 1731342375321 (+3 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375321Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375324 (+3 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375324Cleaning up temporary data from old regions at 1731342375339 (+15 ms)Region opened successfully at 1731342375347 (+8 ms) 2024-11-11T16:26:15,347 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing c8cc0f93157cf70c1af83cf952d59acf, disabling compactions & flushes 2024-11-11T16:26:15,347 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:15,347 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:15,347 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. after waiting 0 ms 2024-11-11T16:26:15,347 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:15,349 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:15,349 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for c8cc0f93157cf70c1af83cf952d59acf: Waiting for close lock at 1731342375347Disabling compacts and flushes for region at 1731342375347Disabling writes for close at 1731342375347Writing region close event to WAL at 1731342375349 (+2 ms)Closed at 1731342375349 2024-11-11T16:26:15,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741906_1084 (size=95) 2024-11-11T16:26:15,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741906_1084 (size=95) 2024-11-11T16:26:15,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741906_1084 (size=95) 2024-11-11T16:26:15,373 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:15,373 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-12526583:(num 1731342375167) 2024-11-11T16:26:15,373 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:15,377 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:15,398 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, exclude list is [], retry=0 2024-11-11T16:26:15,401 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:15,402 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:15,402 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:15,408 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 2024-11-11T16:26:15,414 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:15,501 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => c8cc0f93157cf70c1af83cf952d59acf, NAME => 'testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:15,504 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,504 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:15,505 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,505 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,509 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,511 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName a 2024-11-11T16:26:15,511 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,513 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,513 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,515 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName b 2024-11-11T16:26:15,515 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,517 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,518 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,521 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName c 2024-11-11T16:26:15,522 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:15,525 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:15,526 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,528 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,532 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,535 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,535 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,537 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:15,539 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,540 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c8cc0f93157cf70c1af83cf952d59acf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70246236, jitterRate=0.04675048589706421}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:15,541 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:15,541 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c8cc0f93157cf70c1af83cf952d59acf: Running coprocessor pre-open hook at 1731342375505Writing region info on filesystem at 1731342375505Initializing all the Stores at 1731342375507 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375507Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375507Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342375507Cleaning up temporary data from old regions at 1731342375535 (+28 ms)Running coprocessor post-open hooks at 1731342375541 (+6 ms)Region opened successfully at 1731342375541 2024-11-11T16:26:15,565 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c8cc0f93157cf70c1af83cf952d59acf 3/3 column families, dataSize=590 B heapSize=2.08 KB 2024-11-11T16:26:15,569 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=0 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:16,570 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=1 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:17,571 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=2 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:17,869 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T16:26:18,571 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=3 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:19,398 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion 2024-11-11T16:26:19,398 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsWrittenViaHRegion Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:19,399 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush 2024-11-11T16:26:19,399 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testReplayEditsAfterAbortingFlush Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:19,572 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=4 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:20,573 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=5 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:21,006 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T16:26:21,574 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=6 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:22,574 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=7 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:23,575 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=8 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:24,576 WARN [Time-limited test {}] regionserver.HStore(850): Failed flushing store file for c8cc0f93157cf70c1af83cf952d59acf/a, retrying num=9 java.io.IOException: Simulated exception by tests at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay$CustomStoreFlusher.flushSnapshot(AbstractTestWALReplay.java:619) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.flushCache(HStore.java:832) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl.flushCache(HStore.java:1975) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushCacheAndCommit(HRegion.java:3029) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2737) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2579) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flushcache(HRegion.java:2502) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.flush(HRegion.java:2472) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.testReplayEditsAfterAbortingFlush(AbstractTestWALReplay.java:668) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:24,577 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c8cc0f93157cf70c1af83cf952d59acf: 2024-11-11T16:26:24,577 INFO [Time-limited test {}] wal.AbstractTestWALReplay(671): Expected simulated exception when flushing region, region: testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,588 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c8cc0f93157cf70c1af83cf952d59acf: 2024-11-11T16:26:24,589 INFO [Time-limited test {}] wal.AbstractTestWALReplay(691): Expected exception when flushing region because server is stopped,Aborting flush because server is aborted... 2024-11-11T16:26:24,589 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing c8cc0f93157cf70c1af83cf952d59acf, disabling compactions & flushes 2024-11-11T16:26:24,589 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,589 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,589 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. after waiting 0 ms 2024-11-11T16:26:24,589 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,589 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1190 in region testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,589 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. 2024-11-11T16:26:24,589 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for c8cc0f93157cf70c1af83cf952d59acf: Waiting for close lock at 1731342384589Running coprocessor pre-close hooks at 1731342384589Disabling compacts and flushes for region at 1731342384589Disabling writes for close at 1731342384589Writing region close event to WAL at 1731342384589Running coprocessor post-close hooks at 1731342384589Closed at 1731342384589 2024-11-11T16:26:24,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741908_1086 (size=2691) 2024-11-11T16:26:24,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741908_1086 (size=2691) 2024-11-11T16:26:24,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741908_1086 (size=2691) 2024-11-11T16:26:24,610 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, size=2.6 K (2691bytes) 2024-11-11T16:26:24,610 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 2024-11-11T16:26:24,611 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 after 1ms 2024-11-11T16:26:24,614 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:24,614 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 took 4ms 2024-11-11T16:26:24,617 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 so closing down 2024-11-11T16:26:24,617 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:24,618 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000004-wal.1731342375377.temp 2024-11-11T16:26:24,619 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp 2024-11-11T16:26:24,620 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:24,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741909_1087 (size=2094) 2024-11-11T16:26:24,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741909_1087 (size=2094) 2024-11-11T16:26:24,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741909_1087 (size=2094) 2024-11-11T16:26:24,654 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp (wrote 20 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:24,656 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 2024-11-11T16:26:24,657 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 23 edits across 1 Regions in 41 ms; skipped=3; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, size=2.6 K, length=2691, corrupted=false, cancelled=false 2024-11-11T16:26:24,657 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, journal: Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, size=2.6 K (2691bytes) at 1731342384610Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 so closing down at 1731342384617 (+7 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp at 1731342384619 (+2 ms)3 split writer threads finished at 1731342384620 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp (wrote 20 edits, skipped 0 edits in 0 ms) at 1731342384654 (+34 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000004-wal.1731342375377.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 at 1731342384656 (+2 ms)Processed 23 edits across 1 Regions in 41 ms; skipped=3; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377, size=2.6 K, length=2691, corrupted=false, cancelled=false at 1731342384657 (+1 ms) 2024-11-11T16:26:24,659 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342375377 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342375377 2024-11-11T16:26:24,660 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 2024-11-11T16:26:24,660 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:24,662 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:24,683 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342384663, exclude list is [], retry=0 2024-11-11T16:26:24,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:24,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:24,687 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:24,689 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342384663 2024-11-11T16:26:24,689 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:24,690 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => c8cc0f93157cf70c1af83cf952d59acf, NAME => 'testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:24,691 DEBUG [Time-limited test {}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testReplayEditsAfterAbortingFlush c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,691 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:24,691 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,691 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,693 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,694 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName a 2024-11-11T16:26:24,694 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:24,695 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:24,695 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,696 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName b 2024-11-11T16:26:24,696 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:24,696 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:24,696 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,697 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8cc0f93157cf70c1af83cf952d59acf columnFamilyName c 2024-11-11T16:26:24,697 DEBUG [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:24,697 INFO [StoreOpener-c8cc0f93157cf70c1af83cf952d59acf-1 {}] regionserver.HStore(327): Store=c8cc0f93157cf70c1af83cf952d59acf/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:24,698 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,698 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,700 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,701 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 2024-11-11T16:26:24,703 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:24,705 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 0, firstSequenceIdInLog=4, maxSequenceIdInLog=26, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 2024-11-11T16:26:24,705 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c8cc0f93157cf70c1af83cf952d59acf 3/3 column families, dataSize=1.16 KB heapSize=3.41 KB 2024-11-11T16:26:24,725 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/a/a3fbf7e1f1d548cebe46651c8b6b69f1 is 64, key is testReplayEditsAfterAbortingFlush12/a:q/1731342384581/Put/seqid=0 2024-11-11T16:26:24,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741911_1089 (size=5523) 2024-11-11T16:26:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741911_1089 (size=5523) 2024-11-11T16:26:24,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741911_1089 (size=5523) 2024-11-11T16:26:24,736 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=416 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/a/a3fbf7e1f1d548cebe46651c8b6b69f1 2024-11-11T16:26:24,764 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/b/96e567e53b674e82af4584b950d8b0fe is 64, key is testReplayEditsAfterAbortingFlush10/b:q/1731342384577/Put/seqid=0 2024-11-11T16:26:24,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741912_1090 (size=5524) 2024-11-11T16:26:24,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741912_1090 (size=5524) 2024-11-11T16:26:24,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741912_1090 (size=5524) 2024-11-11T16:26:24,774 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=417 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/b/96e567e53b674e82af4584b950d8b0fe 2024-11-11T16:26:24,798 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/c/ef236f99b85d474e9388f85d36d3ca2d is 64, key is testReplayEditsAfterAbortingFlush11/c:q/1731342384578/Put/seqid=0 2024-11-11T16:26:24,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741913_1091 (size=5457) 2024-11-11T16:26:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741913_1091 (size=5457) 2024-11-11T16:26:24,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741913_1091 (size=5457) 2024-11-11T16:26:24,808 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=357 B at sequenceid=26 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/c/ef236f99b85d474e9388f85d36d3ca2d 2024-11-11T16:26:24,814 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/a/a3fbf7e1f1d548cebe46651c8b6b69f1 as hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/a/a3fbf7e1f1d548cebe46651c8b6b69f1 2024-11-11T16:26:24,819 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/a/a3fbf7e1f1d548cebe46651c8b6b69f1, entries=7, sequenceid=26, filesize=5.4 K 2024-11-11T16:26:24,820 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/b/96e567e53b674e82af4584b950d8b0fe as hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/b/96e567e53b674e82af4584b950d8b0fe 2024-11-11T16:26:24,825 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/b/96e567e53b674e82af4584b950d8b0fe, entries=7, sequenceid=26, filesize=5.4 K 2024-11-11T16:26:24,826 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/.tmp/c/ef236f99b85d474e9388f85d36d3ca2d as hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/c/ef236f99b85d474e9388f85d36d3ca2d 2024-11-11T16:26:24,832 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/c/ef236f99b85d474e9388f85d36d3ca2d, entries=6, sequenceid=26, filesize=5.3 K 2024-11-11T16:26:24,833 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for c8cc0f93157cf70c1af83cf952d59acf in 128ms, sequenceid=26, compaction requested=false; wal=null 2024-11-11T16:26:24,834 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/0000000000000000026 2024-11-11T16:26:24,835 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,835 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,836 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsAfterAbortingFlush descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:24,837 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,840 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsAfterAbortingFlush/c8cc0f93157cf70c1af83cf952d59acf/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=1 2024-11-11T16:26:24,841 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened c8cc0f93157cf70c1af83cf952d59acf; next sequenceid=27; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60250674, jitterRate=-0.10219499468803406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:24,841 DEBUG [Time-limited test {}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c8cc0f93157cf70c1af83cf952d59acf 2024-11-11T16:26:24,842 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for c8cc0f93157cf70c1af83cf952d59acf: Running coprocessor pre-open hook at 1731342384691Writing region info on filesystem at 1731342384691Initializing all the Stores at 1731342384692 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342384692Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342384693 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342384693Obtaining lock to block concurrent updates at 1731342384705 (+12 ms)Preparing flush snapshotting stores in c8cc0f93157cf70c1af83cf952d59acf at 1731342384705Finished memstore snapshotting testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf., syncing WAL and waiting on mvcc, flushsize=dataSize=1190, getHeapSize=3440, getOffHeapSize=0, getCellsCount=20 at 1731342384705Flushing stores of testReplayEditsAfterAbortingFlush,,1731342375160.c8cc0f93157cf70c1af83cf952d59acf. at 1731342384705Flushing c8cc0f93157cf70c1af83cf952d59acf/a: creating writer at 1731342384705Flushing c8cc0f93157cf70c1af83cf952d59acf/a: appending metadata at 1731342384725 (+20 ms)Flushing c8cc0f93157cf70c1af83cf952d59acf/a: closing flushed file at 1731342384725Flushing c8cc0f93157cf70c1af83cf952d59acf/b: creating writer at 1731342384742 (+17 ms)Flushing c8cc0f93157cf70c1af83cf952d59acf/b: appending metadata at 1731342384763 (+21 ms)Flushing c8cc0f93157cf70c1af83cf952d59acf/b: closing flushed file at 1731342384763Flushing c8cc0f93157cf70c1af83cf952d59acf/c: creating writer at 1731342384780 (+17 ms)Flushing c8cc0f93157cf70c1af83cf952d59acf/c: appending metadata at 1731342384797 (+17 ms)Flushing c8cc0f93157cf70c1af83cf952d59acf/c: closing flushed file at 1731342384798 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b67ad7d: reopening flushed file at 1731342384813 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19585705: reopening flushed file at 1731342384819 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c2f19c4: reopening flushed file at 1731342384826 (+7 ms)Finished flush of dataSize ~1.16 KB/1190, heapSize ~3.36 KB/3440, currentSize=0 B/0 for c8cc0f93157cf70c1af83cf952d59acf in 128ms, sequenceid=26, compaction requested=false; wal=null at 1731342384833 (+7 ms)Cleaning up temporary data from old regions at 1731342384835 (+2 ms)Running coprocessor post-open hooks at 1731342384841 (+6 ms)Region opened successfully at 1731342384842 (+1 ms) 2024-11-11T16:26:24,868 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsAfterAbortingFlush Thread=415 (was 411) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:45458 [Waiting for operation #15] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55086 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-22-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55042 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:59900 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:45440 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:59904 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741910_1088] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1168 (was 1094) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=658 (was 691), ProcessCount=11 (was 11), AvailableMemoryMB=2093 (was 2242) 2024-11-11T16:26:24,868 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1168 is superior to 1024 2024-11-11T16:26:24,881 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=415, OpenFileDescriptor=1168, MaxFileDescriptor=1048576, SystemLoadAverage=658, ProcessCount=11, AvailableMemoryMB=2093 2024-11-11T16:26:24,882 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1168 is superior to 1024 2024-11-11T16:26:24,898 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:24,899 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:24,900 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:24,903 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-65840671, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-65840671, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:24,916 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-65840671/hregion-65840671.1731342384903, exclude list is [], retry=0 2024-11-11T16:26:24,919 DEBUG [AsyncFSWAL-24-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:24,919 DEBUG [AsyncFSWAL-24-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:24,920 DEBUG [AsyncFSWAL-24-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:24,923 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-65840671/hregion-65840671.1731342384903 2024-11-11T16:26:24,924 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:24,925 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 009512a45eb021980476ebcd93eb9398, NAME => 'testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testDatalossWhenInputError', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:24,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741915_1093 (size=61) 2024-11-11T16:26:24,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741915_1093 (size=61) 2024-11-11T16:26:24,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741915_1093 (size=61) 2024-11-11T16:26:24,943 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:24,944 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,946 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 009512a45eb021980476ebcd93eb9398 columnFamilyName a 2024-11-11T16:26:24,946 DEBUG [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:24,946 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(327): Store=009512a45eb021980476ebcd93eb9398/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:24,947 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,947 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,948 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,948 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,948 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,950 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,960 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:24,960 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 009512a45eb021980476ebcd93eb9398; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67778393, jitterRate=0.009976759552955627}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:26:24,961 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 009512a45eb021980476ebcd93eb9398: Writing region info on filesystem at 1731342384943Initializing all the Stores at 1731342384944 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342384944Cleaning up temporary data from old regions at 1731342384948 (+4 ms)Region opened successfully at 1731342384961 (+13 ms) 2024-11-11T16:26:24,961 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 009512a45eb021980476ebcd93eb9398, disabling compactions & flushes 2024-11-11T16:26:24,961 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:24,961 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:24,961 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. after waiting 0 ms 2024-11-11T16:26:24,961 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:24,962 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:24,962 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 009512a45eb021980476ebcd93eb9398: Waiting for close lock at 1731342384961Disabling compacts and flushes for region at 1731342384961Disabling writes for close at 1731342384961Writing region close event to WAL at 1731342384962 (+1 ms)Closed at 1731342384962 2024-11-11T16:26:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741914_1092 (size=95) 2024-11-11T16:26:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741914_1092 (size=95) 2024-11-11T16:26:24,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741914_1092 (size=95) 2024-11-11T16:26:24,967 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:24,967 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-65840671:(num 1731342384903) 2024-11-11T16:26:24,967 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:24,970 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:24,984 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, exclude list is [], retry=0 2024-11-11T16:26:24,987 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:24,987 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:24,987 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:24,989 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 2024-11-11T16:26:24,989 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:24,989 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 009512a45eb021980476ebcd93eb9398, NAME => 'testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:24,989 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:24,990 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,990 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,991 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,992 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 009512a45eb021980476ebcd93eb9398 columnFamilyName a 2024-11-11T16:26:24,992 DEBUG [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:24,992 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(327): Store=009512a45eb021980476ebcd93eb9398/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:24,993 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,993 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,994 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,994 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,994 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,996 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:24,997 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 009512a45eb021980476ebcd93eb9398; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63501387, jitterRate=-0.05375559628009796}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:26:24,997 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 009512a45eb021980476ebcd93eb9398: Writing region info on filesystem at 1731342384990Initializing all the Stores at 1731342384991 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342384991Cleaning up temporary data from old regions at 1731342384994 (+3 ms)Region opened successfully at 1731342384997 (+3 ms) 2024-11-11T16:26:25,006 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 009512a45eb021980476ebcd93eb9398, disabling compactions & flushes 2024-11-11T16:26:25,006 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:25,007 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:25,007 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. after waiting 0 ms 2024-11-11T16:26:25,007 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:25,007 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 750 in region testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:25,007 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. 2024-11-11T16:26:25,007 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 009512a45eb021980476ebcd93eb9398: Waiting for close lock at 1731342385006Disabling compacts and flushes for region at 1731342385006Disabling writes for close at 1731342385007 (+1 ms)Writing region close event to WAL at 1731342385007Closed at 1731342385007 2024-11-11T16:26:25,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741916_1094 (size=1050) 2024-11-11T16:26:25,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741916_1094 (size=1050) 2024-11-11T16:26:25,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741916_1094 (size=1050) 2024-11-11T16:26:25,025 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, size=1.0 K (1050bytes) 2024-11-11T16:26:25,025 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 2024-11-11T16:26:25,026 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 after 1ms 2024-11-11T16:26:25,028 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:25,029 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 took 3ms 2024-11-11T16:26:25,030 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 so closing down 2024-11-11T16:26:25,030 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:25,032 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731342384970.temp 2024-11-11T16:26:25,035 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp 2024-11-11T16:26:25,036 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:25,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741917_1095 (size=1050) 2024-11-11T16:26:25,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741917_1095 (size=1050) 2024-11-11T16:26:25,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741917_1095 (size=1050) 2024-11-11T16:26:25,055 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp (wrote 10 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:25,057 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp to hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 2024-11-11T16:26:25,057 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 10 edits across 1 Regions in 28 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, size=1.0 K, length=1050, corrupted=false, cancelled=false 2024-11-11T16:26:25,057 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, journal: Splitting hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, size=1.0 K (1050bytes) at 1731342385025Finishing writing output for hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 so closing down at 1731342385030 (+5 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp at 1731342385035 (+5 ms)3 split writer threads finished at 1731342385036 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp (wrote 10 edits, skipped 0 edits in 0 ms) at 1731342385055 (+19 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000003-wal.1731342384970.temp to hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 at 1731342385057 (+2 ms)Processed 10 edits across 1 Regions in 28 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970, size=1.0 K, length=1050, corrupted=false, cancelled=false at 1731342385057 2024-11-11T16:26:25,059 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342384970 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342384970 2024-11-11T16:26:25,060 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 2024-11-11T16:26:25,063 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:25,391 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:25,393 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:25,407 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342385394, exclude list is [], retry=0 2024-11-11T16:26:25,409 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:25,410 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:25,410 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:25,411 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342385394 2024-11-11T16:26:25,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:25,412 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 009512a45eb021980476ebcd93eb9398, NAME => 'testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:25,412 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:25,412 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,412 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,415 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,416 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 009512a45eb021980476ebcd93eb9398 columnFamilyName a 2024-11-11T16:26:25,416 DEBUG [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,417 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(327): Store=009512a45eb021980476ebcd93eb9398/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,417 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,418 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,419 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,420 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 2024-11-11T16:26:25,421 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:25,423 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 10, skipped 0, firstSequenceIdInLog=3, maxSequenceIdInLog=12, path=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 2024-11-11T16:26:25,423 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 009512a45eb021980476ebcd93eb9398 1/1 column families, dataSize=750 B heapSize=1.73 KB 2024-11-11T16:26:25,440 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/.tmp/a/b66263e4cb784f1d9bcfb0465597647a is 79, key is testDatalossWhenInputError/a:x0/1731342384997/Put/seqid=0 2024-11-11T16:26:25,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741919_1097 (size=5808) 2024-11-11T16:26:25,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741919_1097 (size=5808) 2024-11-11T16:26:25,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741919_1097 (size=5808) 2024-11-11T16:26:25,448 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750 B at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/.tmp/a/b66263e4cb784f1d9bcfb0465597647a 2024-11-11T16:26:25,460 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/.tmp/a/b66263e4cb784f1d9bcfb0465597647a as hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/a/b66263e4cb784f1d9bcfb0465597647a 2024-11-11T16:26:25,470 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/a/b66263e4cb784f1d9bcfb0465597647a, entries=10, sequenceid=12, filesize=5.7 K 2024-11-11T16:26:25,471 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 009512a45eb021980476ebcd93eb9398 in 47ms, sequenceid=12, compaction requested=false; wal=null 2024-11-11T16:26:25,471 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/0000000000000000012 2024-11-11T16:26:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,472 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,475 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,477 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=1 2024-11-11T16:26:25,478 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 009512a45eb021980476ebcd93eb9398; next sequenceid=13; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63419999, jitterRate=-0.054968371987342834}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:26:25,479 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 009512a45eb021980476ebcd93eb9398: Writing region info on filesystem at 1731342385412Initializing all the Stores at 1731342385414 (+2 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385414Obtaining lock to block concurrent updates at 1731342385423 (+9 ms)Preparing flush snapshotting stores in 009512a45eb021980476ebcd93eb9398 at 1731342385423Finished memstore snapshotting testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398., syncing WAL and waiting on mvcc, flushsize=dataSize=750, getHeapSize=1760, getOffHeapSize=0, getCellsCount=10 at 1731342385423Flushing stores of testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398. at 1731342385423Flushing 009512a45eb021980476ebcd93eb9398/a: creating writer at 1731342385423Flushing 009512a45eb021980476ebcd93eb9398/a: appending metadata at 1731342385440 (+17 ms)Flushing 009512a45eb021980476ebcd93eb9398/a: closing flushed file at 1731342385440Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ba559e: reopening flushed file at 1731342385458 (+18 ms)Finished flush of dataSize ~750 B/750, heapSize ~1.72 KB/1760, currentSize=0 B/0 for 009512a45eb021980476ebcd93eb9398 in 47ms, sequenceid=12, compaction requested=false; wal=null at 1731342385471 (+13 ms)Cleaning up temporary data from old regions at 1731342385472 (+1 ms)Region opened successfully at 1731342385479 (+7 ms) 2024-11-11T16:26:25,482 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 009512a45eb021980476ebcd93eb9398, NAME => 'testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:25,482 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testDatalossWhenInputError,,1731342384898.009512a45eb021980476ebcd93eb9398.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:25,482 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,482 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,483 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,484 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 009512a45eb021980476ebcd93eb9398 columnFamilyName a 2024-11-11T16:26:25,484 DEBUG [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,489 DEBUG [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/a/b66263e4cb784f1d9bcfb0465597647a 2024-11-11T16:26:25,489 INFO [StoreOpener-009512a45eb021980476ebcd93eb9398-1 {}] regionserver.HStore(327): Store=009512a45eb021980476ebcd93eb9398/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,489 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,490 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,491 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,491 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,491 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,493 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 009512a45eb021980476ebcd93eb9398 2024-11-11T16:26:25,495 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testDatalossWhenInputError/009512a45eb021980476ebcd93eb9398/recovered.edits/13.seqid, newMaxSeqId=13, maxSeqId=12 2024-11-11T16:26:25,496 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 009512a45eb021980476ebcd93eb9398; next sequenceid=14; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59446391, jitterRate=-0.11417974531650543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:26:25,496 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 009512a45eb021980476ebcd93eb9398: Writing region info on filesystem at 1731342385482Initializing all the Stores at 1731342385483 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385483Cleaning up temporary data from old regions at 1731342385491 (+8 ms)Region opened successfully at 1731342385496 (+5 ms) 2024-11-11T16:26:25,512 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testDatalossWhenInputError Thread=425 (was 415) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:45458 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55144 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:45530 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55086 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: PacketResponder: BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096, type=LAST_IN_PIPELINE java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.waitForAckHead(BlockReceiver.java:1367) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1439) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:59900 [Waiting for operation #14] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:59964 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741918_1096] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/java.io.DataInputStream.read(DataInputStream.java:151) app//org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) app//org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) app//org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-24-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1250 (was 1168) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=654 (was 658), ProcessCount=11 (was 11), AvailableMemoryMB=2084 (was 2093) 2024-11-11T16:26:25,513 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1250 is superior to 1024 2024-11-11T16:26:25,525 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=425, OpenFileDescriptor=1250, MaxFileDescriptor=1048576, SystemLoadAverage=654, ProcessCount=11, AvailableMemoryMB=2083 2024-11-11T16:26:25,525 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1250 is superior to 1024 2024-11-11T16:26:25,540 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:25,542 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:25,542 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:25,545 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-44550959, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-44550959, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:25,558 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-44550959/hregion-44550959.1731342385545, exclude list is [], retry=0 2024-11-11T16:26:25,561 DEBUG [AsyncFSWAL-26-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:25,561 DEBUG [AsyncFSWAL-26-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:25,561 DEBUG [AsyncFSWAL-26-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:25,563 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-44550959/hregion-44550959.1731342385545 2024-11-11T16:26:25,564 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:25,564 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 06e89e2a14be38bd767bb1d1f132acc8, NAME => 'testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testCompactedBulkLoadedFiles', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:25,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741921_1099 (size=63) 2024-11-11T16:26:25,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741921_1099 (size=63) 2024-11-11T16:26:25,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741921_1099 (size=63) 2024-11-11T16:26:25,573 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:25,574 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,575 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName a 2024-11-11T16:26:25,575 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,576 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,576 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,577 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName b 2024-11-11T16:26:25,577 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,577 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,578 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,579 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName c 2024-11-11T16:26:25,579 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,579 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,579 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,580 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,580 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,582 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,582 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,582 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:25,584 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,586 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:25,587 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 06e89e2a14be38bd767bb1d1f132acc8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68273377, jitterRate=0.017352595925331116}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:25,587 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 06e89e2a14be38bd767bb1d1f132acc8: Writing region info on filesystem at 1731342385573Initializing all the Stores at 1731342385573Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385573Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385574 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385574Cleaning up temporary data from old regions at 1731342385582 (+8 ms)Region opened successfully at 1731342385587 (+5 ms) 2024-11-11T16:26:25,587 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 06e89e2a14be38bd767bb1d1f132acc8, disabling compactions & flushes 2024-11-11T16:26:25,587 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:25,587 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:25,587 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. after waiting 0 ms 2024-11-11T16:26:25,587 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:25,588 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:25,588 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 06e89e2a14be38bd767bb1d1f132acc8: Waiting for close lock at 1731342385587Disabling compacts and flushes for region at 1731342385587Disabling writes for close at 1731342385587Writing region close event to WAL at 1731342385588 (+1 ms)Closed at 1731342385588 2024-11-11T16:26:25,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741920_1098 (size=95) 2024-11-11T16:26:25,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741920_1098 (size=95) 2024-11-11T16:26:25,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741920_1098 (size=95) 2024-11-11T16:26:25,592 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:25,593 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-44550959:(num 1731342385545) 2024-11-11T16:26:25,593 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:25,594 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:25,608 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, exclude list is [], retry=0 2024-11-11T16:26:25,610 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:25,610 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:25,611 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:25,612 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 2024-11-11T16:26:25,612 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:25,612 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 06e89e2a14be38bd767bb1d1f132acc8, NAME => 'testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:25,613 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:25,613 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,613 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,614 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,615 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName a 2024-11-11T16:26:25,615 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,615 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,615 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,616 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName b 2024-11-11T16:26:25,616 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,616 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,616 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,617 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName c 2024-11-11T16:26:25,617 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:25,618 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:25,618 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,618 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,619 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,620 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,620 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,621 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:25,622 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:25,623 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 06e89e2a14be38bd767bb1d1f132acc8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73888151, jitterRate=0.1010192483663559}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:25,624 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 06e89e2a14be38bd767bb1d1f132acc8: Writing region info on filesystem at 1731342385613Initializing all the Stores at 1731342385613Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385613Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385614 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342385614Cleaning up temporary data from old regions at 1731342385620 (+6 ms)Region opened successfully at 1731342385624 (+4 ms) 2024-11-11T16:26:25,627 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile0 is 32, key is 000/a:a/1731342385627/Put/seqid=0 2024-11-11T16:26:25,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741923_1101 (size=4875) 2024-11-11T16:26:25,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741923_1101 (size=4875) 2024-11-11T16:26:25,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741923_1101 (size=4875) 2024-11-11T16:26:25,644 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile1 is 32, key is 100/a:a/1731342385644/Put/seqid=0 2024-11-11T16:26:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741924_1102 (size=4875) 2024-11-11T16:26:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741924_1102 (size=4875) 2024-11-11T16:26:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741924_1102 (size=4875) 2024-11-11T16:26:25,661 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile2 is 32, key is 200/a:a/1731342385661/Put/seqid=0 2024-11-11T16:26:25,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741925_1103 (size=4875) 2024-11-11T16:26:25,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741925_1103 (size=4875) 2024-11-11T16:26:25,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741925_1103 (size=4875) 2024-11-11T16:26:25,673 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile0 for inclusion in 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,677 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=000 last=050 2024-11-11T16:26:25,677 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-11T16:26:25,678 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile1 for inclusion in 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,682 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=100 last=150 2024-11-11T16:26:25,682 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-11T16:26:25,682 INFO [Time-limited test {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile2 for inclusion in 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,686 DEBUG [Time-limited test {}] regionserver.HStore(626): HFile bounds: first=200 last=250 2024-11-11T16:26:25,686 DEBUG [Time-limited test {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-11T16:26:25,686 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 06e89e2a14be38bd767bb1d1f132acc8 3/3 column families, dataSize=51 B heapSize=896 B 2024-11-11T16:26:25,702 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp/a/a03442b702304f91bcea026d37ffead0 is 55, key is testCompactedBulkLoadedFiles/a:a/1731342385624/Put/seqid=0 2024-11-11T16:26:25,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741926_1104 (size=5107) 2024-11-11T16:26:25,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741926_1104 (size=5107) 2024-11-11T16:26:25,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741926_1104 (size=5107) 2024-11-11T16:26:25,710 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51 B at sequenceid=4 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp/a/a03442b702304f91bcea026d37ffead0 2024-11-11T16:26:25,716 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp/a/a03442b702304f91bcea026d37ffead0 as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0 2024-11-11T16:26:25,721 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0, entries=1, sequenceid=4, filesize=5.0 K 2024-11-11T16:26:25,722 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~51 B/51, heapSize ~368 B/368, currentSize=0 B/0 for 06e89e2a14be38bd767bb1d1f132acc8 in 36ms, sequenceid=4, compaction requested=false 2024-11-11T16:26:25,722 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 06e89e2a14be38bd767bb1d1f132acc8: 2024-11-11T16:26:25,723 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile0 as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ 2024-11-11T16:26:25,724 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile1 as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ 2024-11-11T16:26:25,725 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile2 as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ 2024-11-11T16:26:25,726 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile0 into 06e89e2a14be38bd767bb1d1f132acc8/a as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ - updating store file list. 2024-11-11T16:26:25,730 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:25,730 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ into 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,730 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile0 into 06e89e2a14be38bd767bb1d1f132acc8/a (new location: hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_) 2024-11-11T16:26:25,732 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile1 into 06e89e2a14be38bd767bb1d1f132acc8/a as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ - updating store file list. 2024-11-11T16:26:25,736 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3d5843076ad7416781f1e792d363faa0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:25,736 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ into 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,736 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile1 into 06e89e2a14be38bd767bb1d1f132acc8/a (new location: hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_) 2024-11-11T16:26:25,737 INFO [Time-limited test {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile2 into 06e89e2a14be38bd767bb1d1f132acc8/a as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ - updating store file list. 2024-11-11T16:26:25,741 DEBUG [Time-limited test {}] regionserver.HStoreFile(483): HFile Bloom filter type for 84e7eacc5707476b9046bdae3117659a_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:25,741 INFO [Time-limited test {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ into 06e89e2a14be38bd767bb1d1f132acc8/a 2024-11-11T16:26:25,741 INFO [Time-limited test {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:39605/hbase/testCompactedBulkLoadedFiles/hfile2 into 06e89e2a14be38bd767bb1d1f132acc8/a (new location: hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_) 2024-11-11T16:26:25,748 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T16:26:25,748 DEBUG [Time-limited test {}] regionserver.HStore(1541): 06e89e2a14be38bd767bb1d1f132acc8/a is initiating major compaction (all files) 2024-11-11T16:26:25,748 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 06e89e2a14be38bd767bb1d1f132acc8/a in testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:25,749 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_] into tmpdir=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp, totalSize=19.3 K 2024-11-11T16:26:25,749 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting a03442b702304f91bcea026d37ffead0, keycount=1, bloomtype=ROW, size=5.0 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=1731342385624 2024-11-11T16:26:25,750 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-11T16:26:25,750 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3d5843076ad7416781f1e792d363faa0_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-11T16:26:25,750 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 84e7eacc5707476b9046bdae3117659a_SeqId_4_, keycount=10, bloomtype=NONE, size=4.8 K, encoding=NONE, compression=NONE, seqNum=4, earliestPutTs=-9223372036854775808 2024-11-11T16:26:25,762 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp/a/1cd80d5f09174af4894e02cd09dcee54 is 55, key is testCompactedBulkLoadedFiles/a:a/1731342385624/Put/seqid=0 2024-11-11T16:26:25,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741927_1105 (size=6154) 2024-11-11T16:26:25,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741927_1105 (size=6154) 2024-11-11T16:26:25,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741927_1105 (size=6154) 2024-11-11T16:26:25,773 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/.tmp/a/1cd80d5f09174af4894e02cd09dcee54 as hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/1cd80d5f09174af4894e02cd09dcee54 2024-11-11T16:26:25,779 INFO [Time-limited test {}] regionserver.HStore(1337): Completed major compaction of 4 (all) file(s) in 06e89e2a14be38bd767bb1d1f132acc8/a of 06e89e2a14be38bd767bb1d1f132acc8 into 1cd80d5f09174af4894e02cd09dcee54(size=6.0 K), total size for store is 6.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T16:26:25,779 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 06e89e2a14be38bd767bb1d1f132acc8: 2024-11-11T16:26:25,779 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-11T16:26:25,779 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 0 store files, 0 compacting, 0 eligible, 16 blocking 2024-11-11T16:26:25,812 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, size=0 (0bytes) 2024-11-11T16:26:25,812 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 might be still open, length is 0 2024-11-11T16:26:25,812 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 2024-11-11T16:26:25,813 WARN [IPC Server handler 4 on default port 39605 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 has not been closed. Lease recovery is in progress. RecoveryId = 1106 for block blk_1073741922_1100 2024-11-11T16:26:25,813 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 after 1ms 2024-11-11T16:26:26,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:55178 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:41813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55178 dst: /127.0.0.1:41813 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41813 remote=/127.0.0.1:55178]. Total timeout mills is 60000, 59192 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:26,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:59988 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:32929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59988 dst: /127.0.0.1:32929 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:26,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:45558 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741922_1100] {}] datanode.DataXceiver(331): 127.0.0.1:40903:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45558 dst: /127.0.0.1:40903 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:26,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741922_1106 (size=1168) 2024-11-11T16:26:26,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741922_1106 (size=1168) 2024-11-11T16:26:26,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741922_1106 (size=1168) 2024-11-11T16:26:29,398 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError 2024-11-11T16:26:29,398 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testDatalossWhenInputError Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:29,399 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles 2024-11-11T16:26:29,399 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testCompactedBulkLoadedFiles Metrics about Tables on a single HBase RegionServer 2024-11-11T16:26:29,814 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 after 4002ms 2024-11-11T16:26:29,817 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:29,817 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 took 4005ms 2024-11-11T16:26:29,819 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595; continuing. 2024-11-11T16:26:29,819 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 so closing down 2024-11-11T16:26:29,819 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:29,820 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731342385595.temp 2024-11-11T16:26:29,821 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp 2024-11-11T16:26:29,822 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741928_1107 (size=548) 2024-11-11T16:26:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741928_1107 (size=548) 2024-11-11T16:26:29,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741928_1107 (size=548) 2024-11-11T16:26:29,828 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp (wrote 2 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:29,830 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp to hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 2024-11-11T16:26:29,830 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 5 edits across 1 Regions in 13 ms; skipped=3; WAL=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, size=0, length=0, corrupted=false, cancelled=false 2024-11-11T16:26:29,830 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, journal: Splitting hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, size=0 (0bytes) at 1731342385812Finishing writing output for hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 so closing down at 1731342389819 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp at 1731342389821 (+2 ms)3 split writer threads finished at 1731342389822 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp (wrote 2 edits, skipped 0 edits in 0 ms) at 1731342389828 (+6 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000003-wal.1731342385595.temp to hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 at 1731342389830 (+2 ms)Processed 5 edits across 1 Regions in 13 ms; skipped=3; WAL=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595, size=0, length=0, corrupted=false, cancelled=false at 1731342389830 2024-11-11T16:26:29,831 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342385595 2024-11-11T16:26:29,832 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 2024-11-11T16:26:29,832 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:29,834 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:29,848 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342389834, exclude list is [], retry=0 2024-11-11T16:26:29,850 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:29,850 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:29,850 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:29,852 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342389834 2024-11-11T16:26:29,852 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:40387:40387)] 2024-11-11T16:26:29,852 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 06e89e2a14be38bd767bb1d1f132acc8, NAME => 'testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:29,853 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:29,853 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,853 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,854 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,855 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName a 2024-11-11T16:26:29,855 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:29,861 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/1cd80d5f09174af4894e02cd09dcee54 2024-11-11T16:26:29,864 DEBUG [StoreFileOpener-06e89e2a14be38bd767bb1d1f132acc8-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:29,864 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ 2024-11-11T16:26:29,867 DEBUG [StoreFileOpener-06e89e2a14be38bd767bb1d1f132acc8-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 3d5843076ad7416781f1e792d363faa0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:29,867 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ 2024-11-11T16:26:29,870 DEBUG [StoreFileOpener-06e89e2a14be38bd767bb1d1f132acc8-a-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 84e7eacc5707476b9046bdae3117659a_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-11T16:26:29,870 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ 2024-11-11T16:26:29,874 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0 2024-11-11T16:26:29,874 WARN [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@afa4f2d 2024-11-11T16:26:29,874 WARN [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@afa4f2d 2024-11-11T16:26:29,874 WARN [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@afa4f2d 2024-11-11T16:26:29,874 WARN [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(317): Clearing the compacted storefile hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0 from org.apache.hadoop.hbase.regionserver.DefaultStoreEngine@afa4f2d 2024-11-11T16:26:29,874 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.StoreEngine(327): Moving the files [hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0] to archive 2024-11-11T16:26:29,875 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T16:26:29,877 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ to hdfs://localhost:39605/hbase/archive/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_ 2024-11-11T16:26:29,878 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ to hdfs://localhost:39605/hbase/archive/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/3d5843076ad7416781f1e792d363faa0_SeqId_4_ 2024-11-11T16:26:29,879 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ to hdfs://localhost:39605/hbase/archive/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/84e7eacc5707476b9046bdae3117659a_SeqId_4_ 2024-11-11T16:26:29,880 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0 to hdfs://localhost:39605/hbase/archive/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/a/a03442b702304f91bcea026d37ffead0 2024-11-11T16:26:29,880 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:29,880 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,881 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName b 2024-11-11T16:26:29,881 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:29,882 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:29,882 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,882 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06e89e2a14be38bd767bb1d1f132acc8 columnFamilyName c 2024-11-11T16:26:29,882 DEBUG [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:29,883 INFO [StoreOpener-06e89e2a14be38bd767bb1d1f132acc8-1 {}] regionserver.HStore(327): Store=06e89e2a14be38bd767bb1d1f132acc8/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:29,883 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,884 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,885 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,886 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 2024-11-11T16:26:29,887 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:29,890 DEBUG [Time-limited test {}] regionserver.HRegion(5836): 06e89e2a14be38bd767bb1d1f132acc8 : Replaying compaction marker table_name: "testCompactedBulkLoadedFiles" encoded_region_name: "06e89e2a14be38bd767bb1d1f132acc8" family_name: "a" compaction_input: "a03442b702304f91bcea026d37ffead0" compaction_input: "2d0c6e390e7642dea0ab6f4bec1daba0_SeqId_4_" compaction_input: "3d5843076ad7416781f1e792d363faa0_SeqId_4_" compaction_input: "84e7eacc5707476b9046bdae3117659a_SeqId_4_" compaction_output: "1cd80d5f09174af4894e02cd09dcee54" store_home_dir: "a" region_name: "testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8." with seqId=9223372036854775807 and lastReplayedOpenRegionSeqId=-1 2024-11-11T16:26:29,890 DEBUG [Time-limited test {}] regionserver.HStore(1354): Completing compaction from the WAL marker 2024-11-11T16:26:29,890 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 0, skipped 2, firstSequenceIdInLog=3, maxSequenceIdInLog=8, path=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 2024-11-11T16:26:29,891 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/0000000000000000008 2024-11-11T16:26:29,892 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,892 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,892 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testCompactedBulkLoadedFiles descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:29,893 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 06e89e2a14be38bd767bb1d1f132acc8 2024-11-11T16:26:29,895 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testCompactedBulkLoadedFiles/06e89e2a14be38bd767bb1d1f132acc8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T16:26:29,896 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 06e89e2a14be38bd767bb1d1f132acc8; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65710179, jitterRate=-0.02084203064441681}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:29,896 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 06e89e2a14be38bd767bb1d1f132acc8: Writing region info on filesystem at 1731342389853Initializing all the Stores at 1731342389854 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342389854Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342389854Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342389854Cleaning up temporary data from old regions at 1731342389892 (+38 ms)Region opened successfully at 1731342389896 (+4 ms) 2024-11-11T16:26:29,899 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 06e89e2a14be38bd767bb1d1f132acc8, disabling compactions & flushes 2024-11-11T16:26:29,899 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:29,899 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:29,899 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. after waiting 0 ms 2024-11-11T16:26:29,899 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:29,900 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testCompactedBulkLoadedFiles,,1731342385541.06e89e2a14be38bd767bb1d1f132acc8. 2024-11-11T16:26:29,900 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 06e89e2a14be38bd767bb1d1f132acc8: Waiting for close lock at 1731342389899Disabling compacts and flushes for region at 1731342389899Disabling writes for close at 1731342389899Writing region close event to WAL at 1731342389900 (+1 ms)Closed at 1731342389900 2024-11-11T16:26:29,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741929_1108 (size=95) 2024-11-11T16:26:29,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741929_1108 (size=95) 2024-11-11T16:26:29,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741929_1108 (size=95) 2024-11-11T16:26:29,905 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:29,905 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731342389834) 2024-11-11T16:26:29,919 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testCompactedBulkLoadedFiles Thread=434 (was 425) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2098531455_22 at /127.0.0.1:48774 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2098531455_22 at /127.0.0.1:41934 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39605 from jenkinstestCompactedBulkLoadedFiles java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-26-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestCompactedBulkLoadedFiles@localhost:39605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2098531455_22 at /127.0.0.1:35864 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-26-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1330 (was 1250) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=654 (was 654), ProcessCount=11 (was 11), AvailableMemoryMB=2059 (was 2083) 2024-11-11T16:26:29,919 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1330 is superior to 1024 2024-11-11T16:26:29,931 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=434, OpenFileDescriptor=1330, MaxFileDescriptor=1048576, SystemLoadAverage=654, ProcessCount=11, AvailableMemoryMB=2059 2024-11-11T16:26:29,931 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1330 is superior to 1024 2024-11-11T16:26:29,947 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:29,974 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:26:29,975 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:26:29,978 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=hregion-33663032, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/hregion-33663032, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:29,992 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/hregion-33663032/hregion-33663032.1731342389978, exclude list is [], retry=0 2024-11-11T16:26:29,995 DEBUG [AsyncFSWAL-28-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:29,995 DEBUG [AsyncFSWAL-28-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:29,995 DEBUG [AsyncFSWAL-28-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:29,997 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/hregion-33663032/hregion-33663032.1731342389978 2024-11-11T16:26:29,998 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:29,998 INFO [Time-limited test {}] regionserver.HRegion(7572): creating {ENCODED => 72312c8c240f28613946b9d26d522498, NAME => 'testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testReplayEditsWrittenViaHRegion', {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39605/hbase 2024-11-11T16:26:30,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741931_1110 (size=67) 2024-11-11T16:26:30,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741931_1110 (size=67) 2024-11-11T16:26:30,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741931_1110 (size=67) 2024-11-11T16:26:30,007 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:30,008 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,009 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName a 2024-11-11T16:26:30,010 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,010 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,010 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,011 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName b 2024-11-11T16:26:30,011 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,012 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,012 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,013 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName c 2024-11-11T16:26:30,013 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,013 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,014 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,014 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,015 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,016 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,016 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,017 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:30,018 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,020 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:26:30,021 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 72312c8c240f28613946b9d26d522498; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65059199, jitterRate=-0.030542388558387756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:30,021 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 72312c8c240f28613946b9d26d522498: Writing region info on filesystem at 1731342390007Initializing all the Stores at 1731342390008 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390008Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390008Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390008Cleaning up temporary data from old regions at 1731342390016 (+8 ms)Region opened successfully at 1731342390021 (+5 ms) 2024-11-11T16:26:30,021 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 72312c8c240f28613946b9d26d522498, disabling compactions & flushes 2024-11-11T16:26:30,021 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,021 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,021 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. after waiting 0 ms 2024-11-11T16:26:30,021 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,022 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,022 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 72312c8c240f28613946b9d26d522498: Waiting for close lock at 1731342390021Disabling compacts and flushes for region at 1731342390021Disabling writes for close at 1731342390021Writing region close event to WAL at 1731342390022 (+1 ms)Closed at 1731342390022 2024-11-11T16:26:30,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741930_1109 (size=95) 2024-11-11T16:26:30,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741930_1109 (size=95) 2024-11-11T16:26:30,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741930_1109 (size=95) 2024-11-11T16:26:30,026 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:30,026 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL hregion-33663032:(num 1731342389978) 2024-11-11T16:26:30,027 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:30,028 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:30,043 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, exclude list is [], retry=0 2024-11-11T16:26:30,045 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:30,046 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:30,046 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:30,048 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 2024-11-11T16:26:30,048 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:30,049 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 72312c8c240f28613946b9d26d522498, NAME => 'testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:30,049 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:30,049 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,049 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,052 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,053 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName a 2024-11-11T16:26:30,053 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,054 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,054 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,054 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName b 2024-11-11T16:26:30,055 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,055 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,055 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,056 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName c 2024-11-11T16:26:30,056 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,056 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,056 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,057 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,058 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,058 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,058 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,059 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:30,060 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,061 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 72312c8c240f28613946b9d26d522498; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66419807, jitterRate=-0.010267749428749084}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:30,061 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 72312c8c240f28613946b9d26d522498: Writing region info on filesystem at 1731342390049Initializing all the Stores at 1731342390050 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390050Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390052 (+2 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390052Cleaning up temporary data from old regions at 1731342390059 (+7 ms)Region opened successfully at 1731342390061 (+2 ms) 2024-11-11T16:26:30,069 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 72312c8c240f28613946b9d26d522498 3/3 column families, dataSize=870 B heapSize=2.31 KB 2024-11-11T16:26:30,086 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/054c2078bc58484c911ad51f1433c7f9 is 91, key is testReplayEditsWrittenViaHRegion/a:x0/1731342390061/Put/seqid=0 2024-11-11T16:26:30,091 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T16:26:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741933_1112 (size=5958) 2024-11-11T16:26:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741933_1112 (size=5958) 2024-11-11T16:26:30,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741933_1112 (size=5958) 2024-11-11T16:26:30,093 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/054c2078bc58484c911ad51f1433c7f9 2024-11-11T16:26:30,100 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/054c2078bc58484c911ad51f1433c7f9 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/054c2078bc58484c911ad51f1433c7f9 2024-11-11T16:26:30,106 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/054c2078bc58484c911ad51f1433c7f9, entries=10, sequenceid=13, filesize=5.8 K 2024-11-11T16:26:30,107 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~870 B/870, heapSize ~1.80 KB/1840, currentSize=0 B/0 for 72312c8c240f28613946b9d26d522498 in 38ms, sequenceid=13, compaction requested=false 2024-11-11T16:26:30,108 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 72312c8c240f28613946b9d26d522498: 2024-11-11T16:26:30,126 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 72312c8c240f28613946b9d26d522498, disabling compactions & flushes 2024-11-11T16:26:30,126 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,126 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,126 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. after waiting 0 ms 2024-11-11T16:26:30,126 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,127 ERROR [Time-limited test {}] regionserver.HRegion(1960): Memstore data size is 1740 in region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,127 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:30,127 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 72312c8c240f28613946b9d26d522498: Waiting for close lock at 1731342390126Disabling compacts and flushes for region at 1731342390126Disabling writes for close at 1731342390126Writing region close event to WAL at 1731342390127 (+1 ms)Closed at 1731342390127 2024-11-11T16:26:30,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741932_1111 (size=3346) 2024-11-11T16:26:30,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741932_1111 (size=3346) 2024-11-11T16:26:30,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741932_1111 (size=3346) 2024-11-11T16:26:30,147 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, size=3.3 K (3346bytes) 2024-11-11T16:26:30,147 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 2024-11-11T16:26:30,148 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 after 1ms 2024-11-11T16:26:30,150 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:30,150 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 took 3ms 2024-11-11T16:26:30,152 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 so closing down 2024-11-11T16:26:30,152 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:30,154 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000003-wal.1731342390029.temp 2024-11-11T16:26:30,155 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp 2024-11-11T16:26:30,155 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741934_1113 (size=2944) 2024-11-11T16:26:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741934_1113 (size=2944) 2024-11-11T16:26:30,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741934_1113 (size=2944) 2024-11-11T16:26:30,163 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:30,164 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 2024-11-11T16:26:30,164 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, size=3.3 K, length=3346, corrupted=false, cancelled=false 2024-11-11T16:26:30,164 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, journal: Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, size=3.3 K (3346bytes) at 1731342390147Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 so closing down at 1731342390152 (+5 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp at 1731342390155 (+3 ms)3 split writer threads finished at 1731342390155Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731342390163 (+8 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000003-wal.1731342390029.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 at 1731342390164 (+1 ms)Processed 32 edits across 1 Regions in 13 ms; skipped=2; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029, size=3.3 K, length=3346, corrupted=false, cancelled=false at 1731342390164 2024-11-11T16:26:30,166 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390029 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342390029 2024-11-11T16:26:30,167 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 2024-11-11T16:26:30,167 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:30,168 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:30,189 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, exclude list is [], retry=0 2024-11-11T16:26:30,192 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:30,193 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:30,193 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:30,194 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 2024-11-11T16:26:30,195 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33071:33071),(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969)] 2024-11-11T16:26:30,195 DEBUG [Time-limited test {}] regionserver.HRegion(7752): Opening region: {ENCODED => 72312c8c240f28613946b9d26d522498, NAME => 'testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:26:30,195 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:30,195 DEBUG [Time-limited test {}] regionserver.HRegion(7794): checking encryption for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,195 DEBUG [Time-limited test {}] regionserver.HRegion(7797): checking classloading for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,196 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,197 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName a 2024-11-11T16:26:30,197 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,204 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/054c2078bc58484c911ad51f1433c7f9 2024-11-11T16:26:30,204 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,204 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,205 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName b 2024-11-11T16:26:30,205 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,205 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,205 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,206 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName c 2024-11-11T16:26:30,206 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:30,206 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:30,206 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,207 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,208 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,209 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 2024-11-11T16:26:30,211 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:30,212 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 20, skipped 10, firstSequenceIdInLog=3, maxSequenceIdInLog=35, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 2024-11-11T16:26:30,212 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 72312c8c240f28613946b9d26d522498 3/3 column families, dataSize=1.70 KB heapSize=3.88 KB 2024-11-11T16:26:30,234 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/9deb8be72ed7495f90adcfa724da725e is 91, key is testReplayEditsWrittenViaHRegion/b:x0/1731342390108/Put/seqid=0 2024-11-11T16:26:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741936_1115 (size=5958) 2024-11-11T16:26:30,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741936_1115 (size=5958) 2024-11-11T16:26:30,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741936_1115 (size=5958) 2024-11-11T16:26:30,243 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/9deb8be72ed7495f90adcfa724da725e 2024-11-11T16:26:30,263 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/c713bec1404a4c44b84ccd2444b7f32e is 91, key is testReplayEditsWrittenViaHRegion/c:x0/1731342390117/Put/seqid=0 2024-11-11T16:26:30,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741937_1116 (size=5958) 2024-11-11T16:26:30,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741937_1116 (size=5958) 2024-11-11T16:26:30,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741937_1116 (size=5958) 2024-11-11T16:26:30,270 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/c713bec1404a4c44b84ccd2444b7f32e 2024-11-11T16:26:30,277 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/9deb8be72ed7495f90adcfa724da725e as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/b/9deb8be72ed7495f90adcfa724da725e 2024-11-11T16:26:30,282 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/b/9deb8be72ed7495f90adcfa724da725e, entries=10, sequenceid=35, filesize=5.8 K 2024-11-11T16:26:30,283 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/c713bec1404a4c44b84ccd2444b7f32e as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/c/c713bec1404a4c44b84ccd2444b7f32e 2024-11-11T16:26:30,289 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/c/c713bec1404a4c44b84ccd2444b7f32e, entries=10, sequenceid=35, filesize=5.8 K 2024-11-11T16:26:30,289 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 72312c8c240f28613946b9d26d522498 in 77ms, sequenceid=35, compaction requested=false; wal=null 2024-11-11T16:26:30,290 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000035 2024-11-11T16:26:30,291 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,291 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,291 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:30,293 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:30,295 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/35.seqid, newMaxSeqId=35, maxSeqId=1 2024-11-11T16:26:30,296 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 72312c8c240f28613946b9d26d522498; next sequenceid=36; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66120273, jitterRate=-0.014731153845787048}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:30,296 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 72312c8c240f28613946b9d26d522498: Writing region info on filesystem at 1731342390195Initializing all the Stores at 1731342390196 (+1 ms)Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390196Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390196Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342390196Obtaining lock to block concurrent updates at 1731342390212 (+16 ms)Preparing flush snapshotting stores in 72312c8c240f28613946b9d26d522498 at 1731342390212Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498., syncing WAL and waiting on mvcc, flushsize=dataSize=1740, getHeapSize=3920, getOffHeapSize=0, getCellsCount=20 at 1731342390213 (+1 ms)Flushing stores of testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. at 1731342390213Flushing 72312c8c240f28613946b9d26d522498/b: creating writer at 1731342390213Flushing 72312c8c240f28613946b9d26d522498/b: appending metadata at 1731342390233 (+20 ms)Flushing 72312c8c240f28613946b9d26d522498/b: closing flushed file at 1731342390233Flushing 72312c8c240f28613946b9d26d522498/c: creating writer at 1731342390249 (+16 ms)Flushing 72312c8c240f28613946b9d26d522498/c: appending metadata at 1731342390263 (+14 ms)Flushing 72312c8c240f28613946b9d26d522498/c: closing flushed file at 1731342390263Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52848657: reopening flushed file at 1731342390276 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43833c97: reopening flushed file at 1731342390282 (+6 ms)Finished flush of dataSize ~1.70 KB/1740, heapSize ~3.59 KB/3680, currentSize=0 B/0 for 72312c8c240f28613946b9d26d522498 in 77ms, sequenceid=35, compaction requested=false; wal=null at 1731342390289 (+7 ms)Cleaning up temporary data from old regions at 1731342390291 (+2 ms)Region opened successfully at 1731342390296 (+5 ms) 2024-11-11T16:26:30,365 INFO [Time-limited test {}] wal.WALSplitter(299): Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, size=0 (0bytes) 2024-11-11T16:26:30,365 WARN [Time-limited test {}] wal.WALSplitter(453): File hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 might be still open, length is 0 2024-11-11T16:26:30,365 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 2024-11-11T16:26:30,366 WARN [IPC Server handler 2 on default port 39605 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 has not been closed. Lease recovery is in progress. RecoveryId = 1117 for block blk_1073741935_1114 2024-11-11T16:26:30,366 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 after 1ms 2024-11-11T16:26:32,587 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:41998 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:41813:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41998 dst: /127.0.0.1:41813 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:41813 remote=/127.0.0.1:41998]. Total timeout mills is 60000, 57739 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:32,588 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:48838 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:40903:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48838 dst: /127.0.0.1:40903 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:32,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1961639719_22 at /127.0.0.1:35932 [Receiving block BP-1916425677-172.17.0.2-1731342345074:blk_1073741935_1114] {}] datanode.DataXceiver(331): 127.0.0.1:32929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35932 dst: /127.0.0.1:32929 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:32,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741935_1117 (size=2936) 2024-11-11T16:26:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741935_1117 (size=2936) 2024-11-11T16:26:33,186 INFO [master/16b413a53992:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T16:26:33,186 INFO [master/16b413a53992:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T16:26:34,367 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 after 4002ms 2024-11-11T16:26:34,370 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169: isRecoveredEdits=false, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:34,370 INFO [Time-limited test {}] wal.WALSplitter(310): Open hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 took 4005ms 2024-11-11T16:26:34,372 INFO [Time-limited test {}] wal.WALSplitter(493): EOF from hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169; continuing. 2024-11-11T16:26:34,372 DEBUG [Time-limited test {}] wal.WALSplitter(406): Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 so closing down 2024-11-11T16:26:34,372 DEBUG [Time-limited test {}] wal.OutputSink(125): Waiting for split writer threads to finish 2024-11-11T16:26:34,373 INFO [Time-limited test-Writer-0 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 0000000000000000037-wal.1731342390169.temp 2024-11-11T16:26:34,374 INFO [Time-limited test-Writer-0 {}] wal.AbstractRecoveredEditsOutputSink(71): Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp 2024-11-11T16:26:34,375 INFO [Time-limited test {}] wal.OutputSink(145): 3 split writer threads finished 2024-11-11T16:26:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741938_1118 (size=2944) 2024-11-11T16:26:34,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741938_1118 (size=2944) 2024-11-11T16:26:34,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741938_1118 (size=2944) 2024-11-11T16:26:34,385 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(90): Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp (wrote 30 edits, skipped 0 edits in 0 ms) 2024-11-11T16:26:34,386 INFO [split-log-closeStream-pool-0 {}] wal.AbstractRecoveredEditsOutputSink(123): Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 2024-11-11T16:26:34,386 INFO [Time-limited test {}] wal.WALSplitter(425): Processed 30 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, size=0, length=0, corrupted=false, cancelled=false 2024-11-11T16:26:34,386 DEBUG [Time-limited test {}] wal.WALSplitter(428): Completed split of hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, journal: Splitting hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, size=0 (0bytes) at 1731342390365Finishing writing output for hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 so closing down at 1731342394372 (+4007 ms)Creating recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp at 1731342394374 (+2 ms)3 split writer threads finished at 1731342394375 (+1 ms)Closed recovered edits writer path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp (wrote 30 edits, skipped 0 edits in 0 ms) at 1731342394385 (+10 ms)Rename recovered edits hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000037-wal.1731342390169.temp to hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 at 1731342394386 (+1 ms)Processed 30 edits across 1 Regions in 16 ms; skipped=0; WAL=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169, size=0, length=0, corrupted=false, cancelled=false at 1731342394386 2024-11-11T16:26:34,388 INFO [Time-limited test {}] wal.WALSplitUtil(143): Moved hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 to hdfs://localhost:39605/hbase/oldWALs/wal.1731342390169 2024-11-11T16:26:34,388 INFO [Time-limited test {}] wal.AbstractTestWALReplay(1167): Split file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 2024-11-11T16:26:34,389 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor monitor 2024-11-11T16:26:34,390 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=wal, suffix=, logDir=hdfs://localhost:39605/hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946, archiveDir=hdfs://localhost:39605/hbase/oldWALs, maxLogs=32 2024-11-11T16:26:34,404 DEBUG [Time-limited test {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342394390, exclude list is [], retry=0 2024-11-11T16:26:34,406 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32929,DS-e86d92e3-e756-4efa-8415-33ee44fedfc2,DISK] 2024-11-11T16:26:34,406 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40903,DS-6aee253a-12c8-459c-998e-494c3b2755a0,DISK] 2024-11-11T16:26:34,406 DEBUG [TestAsyncWALReplay-pool-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = /127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41813,DS-11a8ce1d-a6ec-4582-95e1-dd214088af88,DISK] 2024-11-11T16:26:34,408 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342394390 2024-11-11T16:26:34,408 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40387:40387),(127.0.0.1/127.0.0.1:34969:34969),(127.0.0.1/127.0.0.1:33071:33071)] 2024-11-11T16:26:34,408 DEBUG [Time-limited test {}] regionserver.HRegion(898): Instantiated testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:26:34,411 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family a of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,411 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName a 2024-11-11T16:26:34,412 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:34,417 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/054c2078bc58484c911ad51f1433c7f9 2024-11-11T16:26:34,417 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/a, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:34,417 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family b of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,418 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName b 2024-11-11T16:26:34,418 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:34,422 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/b/9deb8be72ed7495f90adcfa724da725e 2024-11-11T16:26:34,422 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/b, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:34,422 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family c of region 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,423 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 72312c8c240f28613946b9d26d522498 columnFamilyName c 2024-11-11T16:26:34,423 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:26:34,428 DEBUG [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/c/c713bec1404a4c44b84ccd2444b7f32e 2024-11-11T16:26:34,428 INFO [StoreOpener-72312c8c240f28613946b9d26d522498-1 {}] regionserver.HStore(327): Store=72312c8c240f28613946b9d26d522498/c, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:26:34,428 DEBUG [Time-limited test {}] regionserver.HRegion(1038): replaying wal for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,429 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,430 DEBUG [Time-limited test {}] regionserver.HRegion(5546): Found 1 recovered edits file(s) under hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,430 INFO [Time-limited test {}] regionserver.HRegion(5613): Replaying edits from hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 2024-11-11T16:26:34,432 DEBUG [Time-limited test {}] wal.AbstractProtobufWALReader(321): Initializing compression context for hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066: isRecoveredEdits=true, hasTagCompression=true, hasValueCompression=true, valueCompressionType=GZ 2024-11-11T16:26:34,437 DEBUG [Time-limited test {}] regionserver.HRegion(5793): Applied 30, skipped 0, firstSequenceIdInLog=37, maxSequenceIdInLog=66, path=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 2024-11-11T16:26:34,437 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 72312c8c240f28613946b9d26d522498 3/3 column families, dataSize=2.55 KB heapSize=5.44 KB 2024-11-11T16:26:34,452 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/be07a2631bfb4506b5ecb2669138454f is 91, key is testReplayEditsWrittenViaHRegion/a:y0/1731342390304/Put/seqid=0 2024-11-11T16:26:34,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741940_1120 (size=5958) 2024-11-11T16:26:34,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741940_1120 (size=5958) 2024-11-11T16:26:34,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741940_1120 (size=5958) 2024-11-11T16:26:34,460 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/be07a2631bfb4506b5ecb2669138454f 2024-11-11T16:26:34,479 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/7ccc3b3885a944c59ac28933bfe1cb15 is 91, key is testReplayEditsWrittenViaHRegion/b:y0/1731342390312/Put/seqid=0 2024-11-11T16:26:34,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741941_1121 (size=5958) 2024-11-11T16:26:34,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741941_1121 (size=5958) 2024-11-11T16:26:34,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741941_1121 (size=5958) 2024-11-11T16:26:34,486 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/7ccc3b3885a944c59ac28933bfe1cb15 2024-11-11T16:26:34,504 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/8154315c6784484288b927ca518c9773 is 91, key is testReplayEditsWrittenViaHRegion/c:y0/1731342390319/Put/seqid=0 2024-11-11T16:26:34,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741942_1122 (size=5958) 2024-11-11T16:26:34,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741942_1122 (size=5958) 2024-11-11T16:26:34,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741942_1122 (size=5958) 2024-11-11T16:26:34,511 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=870 B at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/8154315c6784484288b927ca518c9773 2024-11-11T16:26:34,516 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/a/be07a2631bfb4506b5ecb2669138454f as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/be07a2631bfb4506b5ecb2669138454f 2024-11-11T16:26:34,521 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/a/be07a2631bfb4506b5ecb2669138454f, entries=10, sequenceid=66, filesize=5.8 K 2024-11-11T16:26:34,522 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/b/7ccc3b3885a944c59ac28933bfe1cb15 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/b/7ccc3b3885a944c59ac28933bfe1cb15 2024-11-11T16:26:34,526 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/b/7ccc3b3885a944c59ac28933bfe1cb15, entries=10, sequenceid=66, filesize=5.8 K 2024-11-11T16:26:34,526 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/.tmp/c/8154315c6784484288b927ca518c9773 as hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/c/8154315c6784484288b927ca518c9773 2024-11-11T16:26:34,530 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/c/8154315c6784484288b927ca518c9773, entries=10, sequenceid=66, filesize=5.8 K 2024-11-11T16:26:34,531 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 72312c8c240f28613946b9d26d522498 in 94ms, sequenceid=66, compaction requested=false; wal=null 2024-11-11T16:26:34,531 DEBUG [Time-limited test {}] regionserver.HRegion(5420): Deleted recovered.edits file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/0000000000000000066 2024-11-11T16:26:34,532 DEBUG [Time-limited test {}] regionserver.HRegion(1048): stopping wal replay for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,532 DEBUG [Time-limited test {}] regionserver.HRegion(1060): Cleaning up temporary data for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,533 DEBUG [Time-limited test {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table testReplayEditsWrittenViaHRegion descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T16:26:34,534 DEBUG [Time-limited test {}] regionserver.HRegion(1093): writing seq id for 72312c8c240f28613946b9d26d522498 2024-11-11T16:26:34,536 DEBUG [Time-limited test {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/hbase/data/default/testReplayEditsWrittenViaHRegion/72312c8c240f28613946b9d26d522498/recovered.edits/66.seqid, newMaxSeqId=66, maxSeqId=35 2024-11-11T16:26:34,537 INFO [Time-limited test {}] regionserver.HRegion(1114): Opened 72312c8c240f28613946b9d26d522498; next sequenceid=67; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65779483, jitterRate=-0.01980932056903839}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T16:26:34,537 DEBUG [Time-limited test {}] regionserver.HRegion(1006): Region open journal for 72312c8c240f28613946b9d26d522498: Writing region info on filesystem at 1731342394409Initializing all the Stores at 1731342394409Instantiating store for column family {NAME => 'a', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342394409Instantiating store for column family {NAME => 'b', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342394410 (+1 ms)Instantiating store for column family {NAME => 'c', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342394410Obtaining lock to block concurrent updates at 1731342394437 (+27 ms)Preparing flush snapshotting stores in 72312c8c240f28613946b9d26d522498 at 1731342394437Finished memstore snapshotting testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498., syncing WAL and waiting on mvcc, flushsize=dataSize=2610, getHeapSize=5520, getOffHeapSize=0, getCellsCount=30 at 1731342394437Flushing stores of testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. at 1731342394437Flushing 72312c8c240f28613946b9d26d522498/a: creating writer at 1731342394437Flushing 72312c8c240f28613946b9d26d522498/a: appending metadata at 1731342394452 (+15 ms)Flushing 72312c8c240f28613946b9d26d522498/a: closing flushed file at 1731342394452Flushing 72312c8c240f28613946b9d26d522498/b: creating writer at 1731342394464 (+12 ms)Flushing 72312c8c240f28613946b9d26d522498/b: appending metadata at 1731342394478 (+14 ms)Flushing 72312c8c240f28613946b9d26d522498/b: closing flushed file at 1731342394478Flushing 72312c8c240f28613946b9d26d522498/c: creating writer at 1731342394490 (+12 ms)Flushing 72312c8c240f28613946b9d26d522498/c: appending metadata at 1731342394503 (+13 ms)Flushing 72312c8c240f28613946b9d26d522498/c: closing flushed file at 1731342394503Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5380a141: reopening flushed file at 1731342394516 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@540ba4ee: reopening flushed file at 1731342394521 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19857ad0: reopening flushed file at 1731342394526 (+5 ms)Finished flush of dataSize ~2.55 KB/2610, heapSize ~5.39 KB/5520, currentSize=0 B/0 for 72312c8c240f28613946b9d26d522498 in 94ms, sequenceid=66, compaction requested=false; wal=null at 1731342394531 (+5 ms)Cleaning up temporary data from old regions at 1731342394532 (+1 ms)Region opened successfully at 1731342394537 (+5 ms) 2024-11-11T16:26:34,550 DEBUG [Time-limited test {}] regionserver.HRegion(1722): Closing 72312c8c240f28613946b9d26d522498, disabling compactions & flushes 2024-11-11T16:26:34,550 INFO [Time-limited test {}] regionserver.HRegion(1755): Closing region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:34,550 DEBUG [Time-limited test {}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:34,550 DEBUG [Time-limited test {}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. after waiting 0 ms 2024-11-11T16:26:34,550 DEBUG [Time-limited test {}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:34,551 INFO [Time-limited test {}] regionserver.HRegion(1973): Closed testReplayEditsWrittenViaHRegion,,1731342389947.72312c8c240f28613946b9d26d522498. 2024-11-11T16:26:34,551 DEBUG [Time-limited test {}] regionserver.HRegion(1676): Region close journal for 72312c8c240f28613946b9d26d522498: Waiting for close lock at 1731342394549Disabling compacts and flushes for region at 1731342394549Disabling writes for close at 1731342394550 (+1 ms)Writing region close event to WAL at 1731342394551 (+1 ms)Closed at 1731342394551 2024-11-11T16:26:34,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741939_1119 (size=95) 2024-11-11T16:26:34,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741939_1119 (size=95) 2024-11-11T16:26:34,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741939_1119 (size=95) 2024-11-11T16:26:34,555 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /hbase/oldWALs 2024-11-11T16:26:34,555 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL wal:(num 1731342394390) 2024-11-11T16:26:34,570 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestAsyncWALReplayValueCompression#testReplayEditsWrittenViaHRegion Thread=438 (was 434) Potentially hanging thread: IPC Client (1319272049) connection to localhost/127.0.0.1:39605 from jenkinstestReplayEditsWrittenViaHRegion java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: AsyncFSWAL-28-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-166485236_22 at /127.0.0.1:48884 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkinstestReplayEditsWrittenViaHRegion@localhost:39605 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: AsyncFSWAL-28-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-166485236_22 at /127.0.0.1:42062 [Waiting for operation #11] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-166485236_22 at /127.0.0.1:35958 [Waiting for operation #22] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=1392 (was 1330) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=625 (was 654), ProcessCount=11 (was 11), AvailableMemoryMB=2020 (was 2059) 2024-11-11T16:26:34,570 WARN [Time-limited test {}] hbase.ResourceChecker(130): OpenFileDescriptor=1392 is superior to 1024 2024-11-11T16:26:34,570 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T16:26:34,570 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:26:34,570 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:26:34,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,571 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T16:26:34,571 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T16:26:34,571 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=524162917, stopped=false 2024-11-11T16:26:34,572 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=16b413a53992,40215,1731342348830 2024-11-11T16:26:34,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:26:34,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:26:34,574 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:26:34,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:26:34,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:26:34,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:26:34,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:26:34,574 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:26:34,575 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:26:34,575 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:26:34,575 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:26:34,575 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:26:34,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,575 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,43519,1731342349897' ***** 2024-11-11T16:26:34,575 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:26:34,575 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,42465,1731342350046' ***** 2024-11-11T16:26:34,575 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:26:34,575 INFO [RS:0;16b413a53992:43519 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:26:34,575 INFO [RS:1;16b413a53992:42465 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:26:34,576 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:26:34,576 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:26:34,576 INFO [RS:0;16b413a53992:43519 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:26:34,576 INFO [RS:0;16b413a53992:43519 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(3091): Received CLOSE for 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:34,576 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,43519,1731342349897 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,42465,1731342350046 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:26:34,576 INFO [RS:0;16b413a53992:43519 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:26:34,576 INFO [RS:1;16b413a53992:42465 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;16b413a53992:42465. 2024-11-11T16:26:34,576 INFO [RS:0;16b413a53992:43519 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;16b413a53992:43519. 2024-11-11T16:26:34,576 DEBUG [RS:1;16b413a53992:42465 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:26:34,576 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9e5cca078c8f306f6c3dea9fad229919, disabling compactions & flushes 2024-11-11T16:26:34,576 DEBUG [RS:0;16b413a53992:43519 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:26:34,576 DEBUG [RS:1;16b413a53992:42465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,577 DEBUG [RS:0;16b413a53992:43519 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,577 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:34,577 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:34,577 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. after waiting 0 ms 2024-11-11T16:26:34,577 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T16:26:34,577 INFO [RS:0;16b413a53992:43519 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:34,577 INFO [RS:0;16b413a53992:43519 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:26:34,577 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1325): Online Regions={9e5cca078c8f306f6c3dea9fad229919=testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919.} 2024-11-11T16:26:34,577 INFO [RS:0;16b413a53992:43519 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:26:34,577 DEBUG [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1351): Waiting on 9e5cca078c8f306f6c3dea9fad229919 2024-11-11T16:26:34,577 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T16:26:34,577 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T16:26:34,577 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T16:26:34,577 DEBUG [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:26:34,577 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:26:34,577 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:26:34,578 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.19 KB heapSize=2.79 KB 2024-11-11T16:26:34,582 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/default/testReplayEditsAfterRegionMovedWithMultiCF/9e5cca078c8f306f6c3dea9fad229919/recovered.edits/20.seqid, newMaxSeqId=20, maxSeqId=17 2024-11-11T16:26:34,582 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:34,583 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9e5cca078c8f306f6c3dea9fad229919: Waiting for close lock at 1731342394576Running coprocessor pre-close hooks at 1731342394576Disabling compacts and flushes for region at 1731342394576Disabling writes for close at 1731342394577 (+1 ms)Writing region close event to WAL at 1731342394578 (+1 ms)Running coprocessor post-close hooks at 1731342394582 (+4 ms)Closed at 1731342394582 2024-11-11T16:26:34,583 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919. 2024-11-11T16:26:34,583 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/3e153607294e48e2935693ed94e0f38f is 205, key is testReplayEditsAfterRegionMovedWithMultiCF,,1731342368307.9e5cca078c8f306f6c3dea9fad229919./info:regioninfo/1731342373049/Put/seqid=0 2024-11-11T16:26:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741943_1123 (size=6778) 2024-11-11T16:26:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741943_1123 (size=6778) 2024-11-11T16:26:34,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741943_1123 (size=6778) 2024-11-11T16:26:34,590 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.19 KB at sequenceid=23 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/3e153607294e48e2935693ed94e0f38f 2024-11-11T16:26:34,595 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/.tmp/info/3e153607294e48e2935693ed94e0f38f as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/info/3e153607294e48e2935693ed94e0f38f 2024-11-11T16:26:34,600 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/info/3e153607294e48e2935693ed94e0f38f, entries=8, sequenceid=23, filesize=6.6 K 2024-11-11T16:26:34,601 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 24ms, sequenceid=23, compaction requested=false 2024-11-11T16:26:34,601 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T16:26:34,606 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/data/hbase/meta/1588230740/recovered.edits/26.seqid, newMaxSeqId=26, maxSeqId=18 2024-11-11T16:26:34,607 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T16:26:34,607 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:26:34,607 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342394577Running coprocessor pre-close hooks at 1731342394577Disabling compacts and flushes for region at 1731342394577Disabling writes for close at 1731342394577Obtaining lock to block concurrent updates at 1731342394578 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731342394578Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1218, getHeapSize=2792, getOffHeapSize=0, getCellsCount=8 at 1731342394578Flushing stores of hbase:meta,,1.1588230740 at 1731342394578Flushing 1588230740/info: creating writer at 1731342394579 (+1 ms)Flushing 1588230740/info: appending metadata at 1731342394583 (+4 ms)Flushing 1588230740/info: closing flushed file at 1731342394583Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20cb3ef8: reopening flushed file at 1731342394595 (+12 ms)Finished flush of dataSize ~1.19 KB/1218, heapSize ~2.02 KB/2072, currentSize=0 B/0 for 1588230740 in 24ms, sequenceid=23, compaction requested=false at 1731342394601 (+6 ms)Writing region close event to WAL at 1731342394603 (+2 ms)Running coprocessor post-close hooks at 1731342394607 (+4 ms)Closed at 1731342394607 2024-11-11T16:26:34,607 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T16:26:34,777 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,42465,1731342350046; all regions closed. 2024-11-11T16:26:34,777 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,43519,1731342349897; all regions closed. 2024-11-11T16:26:34,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741835_1011 (size=721) 2024-11-11T16:26:34,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741835_1011 (size=721) 2024-11-11T16:26:34,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741893_1071 (size=1674) 2024-11-11T16:26:34,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741893_1071 (size=1674) 2024-11-11T16:26:34,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741893_1071 (size=1674) 2024-11-11T16:26:34,783 DEBUG [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs 2024-11-11T16:26:34,783 INFO [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C43519%2C1731342349897.meta:.meta(num 1731342372027) 2024-11-11T16:26:34,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741834_1010 (size=1711) 2024-11-11T16:26:34,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741834_1010 (size=1711) 2024-11-11T16:26:34,786 DEBUG [RS:1;16b413a53992:42465 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs 2024-11-11T16:26:34,786 INFO [RS:1;16b413a53992:42465 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C42465%2C1731342350046:(num 1731342352145) 2024-11-11T16:26:34,786 DEBUG [RS:1;16b413a53992:42465 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,786 INFO [RS:1;16b413a53992:42465 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:34,786 INFO [RS:1;16b413a53992:42465 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:26:34,787 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:26:34,787 INFO [RS:1;16b413a53992:42465 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42465 2024-11-11T16:26:34,788 DEBUG [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/oldWALs 2024-11-11T16:26:34,788 INFO [RS:0;16b413a53992:43519 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C43519%2C1731342349897:(num 1731342352137) 2024-11-11T16:26:34,788 DEBUG [RS:0;16b413a53992:43519 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:26:34,788 INFO [RS:0;16b413a53992:43519 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:26:34,788 INFO [RS:0;16b413a53992:43519 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:26:34,788 INFO [RS:0;16b413a53992:43519 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T16:26:34,788 INFO [RS:0;16b413a53992:43519 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:26:34,789 INFO [RS:0;16b413a53992:43519 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43519 2024-11-11T16:26:34,789 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:26:34,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:26:34,790 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,42465,1731342350046 2024-11-11T16:26:34,790 INFO [RS:1;16b413a53992:42465 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:26:34,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,43519,1731342349897 2024-11-11T16:26:34,791 INFO [RS:0;16b413a53992:43519 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:26:34,792 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,43519,1731342349897] 2024-11-11T16:26:34,794 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,43519,1731342349897 already deleted, retry=false 2024-11-11T16:26:34,794 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,43519,1731342349897 expired; onlineServers=1 2024-11-11T16:26:34,794 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,42465,1731342350046] 2024-11-11T16:26:34,795 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,42465,1731342350046 already deleted, retry=false 2024-11-11T16:26:34,795 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,42465,1731342350046 expired; onlineServers=0 2024-11-11T16:26:34,795 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '16b413a53992,40215,1731342348830' ***** 2024-11-11T16:26:34,795 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T16:26:34,795 INFO [M:0;16b413a53992:40215 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:26:34,795 INFO [M:0;16b413a53992:40215 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:26:34,795 DEBUG [M:0;16b413a53992:40215 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T16:26:34,796 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T16:26:34,796 DEBUG [M:0;16b413a53992:40215 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T16:26:34,796 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342351606 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342351606,5,FailOnTimeoutGroup] 2024-11-11T16:26:34,796 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342351609 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342351609,5,FailOnTimeoutGroup] 2024-11-11T16:26:34,796 INFO [M:0;16b413a53992:40215 {}] hbase.ChoreService(370): Chore service for: master/16b413a53992:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T16:26:34,796 INFO [M:0;16b413a53992:40215 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:26:34,796 DEBUG [M:0;16b413a53992:40215 {}] master.HMaster(1795): Stopping service threads 2024-11-11T16:26:34,796 INFO [M:0;16b413a53992:40215 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T16:26:34,796 INFO [M:0;16b413a53992:40215 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:26:34,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T16:26:34,797 INFO [M:0;16b413a53992:40215 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T16:26:34,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:26:34,797 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T16:26:34,798 DEBUG [M:0;16b413a53992:40215 {}] zookeeper.ZKUtil(347): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T16:26:34,798 WARN [M:0;16b413a53992:40215 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T16:26:34,798 INFO [M:0;16b413a53992:40215 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/.lastflushedseqids 2024-11-11T16:26:34,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741944_1124 (size=119) 2024-11-11T16:26:34,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741944_1124 (size=119) 2024-11-11T16:26:34,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741944_1124 (size=119) 2024-11-11T16:26:34,812 INFO [M:0;16b413a53992:40215 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T16:26:34,812 INFO [M:0;16b413a53992:40215 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T16:26:34,812 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:26:34,812 INFO [M:0;16b413a53992:40215 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:26:34,812 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:26:34,812 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:26:34,812 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:26:34,812 INFO [M:0;16b413a53992:40215 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=83.40 KB heapSize=102.70 KB 2024-11-11T16:26:34,828 DEBUG [M:0;16b413a53992:40215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10f34f1773be4765b493517b2f86da48 is 82, key is hbase:meta,,1/info:regioninfo/1731342372232/Put/seqid=0 2024-11-11T16:26:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741945_1125 (size=6063) 2024-11-11T16:26:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741945_1125 (size=6063) 2024-11-11T16:26:34,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741945_1125 (size=6063) 2024-11-11T16:26:34,835 INFO [M:0;16b413a53992:40215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1008 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10f34f1773be4765b493517b2f86da48 2024-11-11T16:26:34,854 DEBUG [M:0;16b413a53992:40215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03c18b9a46c3483c8e2c7b88dfa2ac60 is 1076, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731342369177/Put/seqid=0 2024-11-11T16:26:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741946_1126 (size=7907) 2024-11-11T16:26:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741946_1126 (size=7907) 2024-11-11T16:26:34,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741946_1126 (size=7907) 2024-11-11T16:26:34,862 INFO [M:0;16b413a53992:40215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=82.17 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03c18b9a46c3483c8e2c7b88dfa2ac60 2024-11-11T16:26:34,867 INFO [M:0;16b413a53992:40215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03c18b9a46c3483c8e2c7b88dfa2ac60 2024-11-11T16:26:34,882 DEBUG [M:0;16b413a53992:40215 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/716b31c59f4a4f6abc21ae5a2648fe37 is 69, key is 16b413a53992,42465,1731342350046/rs:state/1731342351693/Put/seqid=0 2024-11-11T16:26:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32929 is added to blk_1073741947_1127 (size=5440) 2024-11-11T16:26:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741947_1127 (size=5440) 2024-11-11T16:26:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741947_1127 (size=5440) 2024-11-11T16:26:34,890 INFO [M:0;16b413a53992:40215 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=249 B at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/716b31c59f4a4f6abc21ae5a2648fe37 2024-11-11T16:26:34,892 INFO [RS:1;16b413a53992:42465 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:26:34,892 INFO [RS:1;16b413a53992:42465 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,42465,1731342350046; zookeeper connection closed. 2024-11-11T16:26:34,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:34,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42465-0x1002fa9b94b0002, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:34,893 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12a39bca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12a39bca 2024-11-11T16:26:34,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:34,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43519-0x1002fa9b94b0001, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:34,893 INFO [RS:0;16b413a53992:43519 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:26:34,893 INFO [RS:0;16b413a53992:43519 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,43519,1731342349897; zookeeper connection closed. 2024-11-11T16:26:34,893 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@769ad840 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@769ad840 2024-11-11T16:26:34,894 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T16:26:34,895 INFO [M:0;16b413a53992:40215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 716b31c59f4a4f6abc21ae5a2648fe37 2024-11-11T16:26:34,896 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10f34f1773be4765b493517b2f86da48 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10f34f1773be4765b493517b2f86da48 2024-11-11T16:26:34,901 INFO [M:0;16b413a53992:40215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10f34f1773be4765b493517b2f86da48, entries=14, sequenceid=207, filesize=5.9 K 2024-11-11T16:26:34,902 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/03c18b9a46c3483c8e2c7b88dfa2ac60 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03c18b9a46c3483c8e2c7b88dfa2ac60 2024-11-11T16:26:34,907 INFO [M:0;16b413a53992:40215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 03c18b9a46c3483c8e2c7b88dfa2ac60 2024-11-11T16:26:34,907 INFO [M:0;16b413a53992:40215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/03c18b9a46c3483c8e2c7b88dfa2ac60, entries=21, sequenceid=207, filesize=7.7 K 2024-11-11T16:26:34,908 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/716b31c59f4a4f6abc21ae5a2648fe37 as hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/716b31c59f4a4f6abc21ae5a2648fe37 2024-11-11T16:26:34,913 INFO [M:0;16b413a53992:40215 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 716b31c59f4a4f6abc21ae5a2648fe37 2024-11-11T16:26:34,913 INFO [M:0;16b413a53992:40215 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39605/user/jenkins/test-data/3efeeae3-2500-68d0-5b6a-fa67d64bf553/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/716b31c59f4a4f6abc21ae5a2648fe37, entries=3, sequenceid=207, filesize=5.3 K 2024-11-11T16:26:34,914 INFO [M:0;16b413a53992:40215 {}] regionserver.HRegion(3140): Finished flush of dataSize ~83.40 KB/85398, heapSize ~102.41 KB/104864, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 102ms, sequenceid=207, compaction requested=false 2024-11-11T16:26:34,915 INFO [M:0;16b413a53992:40215 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:26:34,915 DEBUG [M:0;16b413a53992:40215 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342394812Disabling compacts and flushes for region at 1731342394812Disabling writes for close at 1731342394812Obtaining lock to block concurrent updates at 1731342394812Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731342394812Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=85398, getHeapSize=105104, getOffHeapSize=0, getCellsCount=248 at 1731342394812Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731342394813 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731342394813Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731342394828 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731342394828Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731342394839 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731342394854 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731342394854Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731342394867 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731342394881 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731342394881Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28482cb0: reopening flushed file at 1731342394896 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6199b9f7: reopening flushed file at 1731342394902 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c361a7b: reopening flushed file at 1731342394907 (+5 ms)Finished flush of dataSize ~83.40 KB/85398, heapSize ~102.41 KB/104864, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 102ms, sequenceid=207, compaction requested=false at 1731342394914 (+7 ms)Writing region close event to WAL at 1731342394915 (+1 ms)Closed at 1731342394915 2024-11-11T16:26:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41813 is added to blk_1073741830_1006 (size=69567) 2024-11-11T16:26:34,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40903 is added to blk_1073741830_1006 (size=69567) 2024-11-11T16:26:34,919 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:26:34,919 INFO [M:0;16b413a53992:40215 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T16:26:34,919 INFO [M:0;16b413a53992:40215 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40215 2024-11-11T16:26:34,920 INFO [M:0;16b413a53992:40215 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:26:35,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:35,022 INFO [M:0;16b413a53992:40215 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:26:35,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40215-0x1002fa9b94b0000, quorum=127.0.0.1:59036, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:26:35,029 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342384663 with renewLeaseKey: DEFAULT_16688 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342384663 (inode 16688) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterabortingflush-manual,16010,1731342375157/wal.1731342384663 (inode 16688) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-11T16:26:35,032 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342374873 with renewLeaseKey: DEFAULT_16665 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342374873 (inode 16665) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testreplayeditsafterpartialflush-manual,16010,1731342373536/wal.1731342374873 (inode 16665) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-11T16:26:35,033 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testregionmadeofbulkloadedfilesonly-manual,16010,1731342363684/wal.1731342363812 with renewLeaseKey: DEFAULT_16586 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:35,035 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342385394 with renewLeaseKey: DEFAULT_16714 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342385394 (inode 16714) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testdatalosswheninputerror-manual,16010,1731342384897/wal.1731342385394 (inode 16714) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-11T16:26:35,036 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testcompactedbulkloadedfiles-manual,16010,1731342385539/wal.1731342385595 with renewLeaseKey: DEFAULT_16736 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:35,036 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenviahregion-manual,16010,1731342389946/wal.1731342390169 with renewLeaseKey: DEFAULT_16777 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:35,036 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testreplayeditswrittenintowal-manual,16010,1731342354711/wal.1731342354983 with renewLeaseKey: DEFAULT_16506 java.io.IOException: stream already broken at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.endBlock(FanOutOneBlockAsyncDFSOutput.java:566) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:615) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:26:35,038 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal.1731342354482 with renewLeaseKey: DEFAULT_16485 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal.1731342354482 (inode 16485) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit1-manual,16010,1731342354039/wal.1731342354482 (inode 16485) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-11T16:26:35,041 ERROR [Time-limited test {}] hdfs.DFSClient(665): Failed to close file: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal.1731342353660 with renewLeaseKey: DEFAULT_16462 java.io.FileNotFoundException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal.1731342353660 (inode 16462) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:110) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:653) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.close(FanOutOneBlockAsyncDFSOutput.java:619) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hdfs.DummyDFSOutputStream.close(DummyDFSOutputStream.java:52) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:662) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:699) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1528) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2194) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) ~[hadoop-hdfs-3.4.1-tests.jar:?] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) ~[test-classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.tearDownAfterClass(AbstractTestWALReplay.java:153) ~[test-classes/:?] at org.apache.hadoop.hbase.regionserver.wal.TestAsyncWALReplay.tearDownAfterClass(TestAsyncWALReplay.java:67) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /hbase/WALs/testnameconflictwhensplit0-manual,16010,1731342353298/wal.1731342353660 (inode 16462) Holder DFSClient_NONMAPREDUCE_1961639719_22 does not have any open files. at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3188) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFileInternal(FSDirWriteFileOp.java:703) at org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.completeFile(FSDirWriteFileOp.java:689) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3232) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:983) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:649) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$complete$13(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:532) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor3.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.complete(Unknown Source) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile(FanOutOneBlockAsyncDFSOutputHelper.java:646) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 25 more 2024-11-11T16:26:35,045 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35f1150e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:26:35,048 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13a77e13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:26:35,048 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:26:35,048 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@f9972d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:26:35,048 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a6d5e13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,STOPPED} 2024-11-11T16:26:35,051 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:26:35,051 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:26:35,051 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1916425677-172.17.0.2-1731342345074 (Datanode Uuid 70355d25-5036-47bb-bb7d-eed18d16b805) service to localhost/127.0.0.1:39605 2024-11-11T16:26:35,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:26:35,052 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data5/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,053 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data6/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,053 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:26:35,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7bd427b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:26:35,056 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6915083f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:26:35,056 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:26:35,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cc2d6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:26:35,056 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46b092e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,STOPPED} 2024-11-11T16:26:35,057 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:26:35,057 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:26:35,057 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1916425677-172.17.0.2-1731342345074 (Datanode Uuid cee060b1-9d20-4042-8c32-8327c5728766) service to localhost/127.0.0.1:39605 2024-11-11T16:26:35,057 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:26:35,058 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data3/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,059 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data4/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,059 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:26:35,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@330740de{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:26:35,061 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7b24cab9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:26:35,061 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:26:35,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a359997{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:26:35,061 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cf5a85e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,STOPPED} 2024-11-11T16:26:35,063 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:26:35,063 WARN [BP-1916425677-172.17.0.2-1731342345074 heartbeating to localhost/127.0.0.1:39605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1916425677-172.17.0.2-1731342345074 (Datanode Uuid b2549184-d599-471d-b3b3-6438de864e70) service to localhost/127.0.0.1:39605 2024-11-11T16:26:35,063 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data1/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/cluster_8d45aafd-7d3f-1e8c-d15d-b9458076554b/data/data2/current/BP-1916425677-172.17.0.2-1731342345074 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:26:35,064 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:26:35,064 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:26:35,064 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:26:35,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3717288f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:26:35,071 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4bd70930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:26:35,071 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:26:35,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dc9d5c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:26:35,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f37ffca{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/31d01fd1-fe23-b20a-4aa8-ffb4b049d273/hadoop.log.dir/,STOPPED} 2024-11-11T16:26:35,081 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T16:26:35,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down